prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from __future__ import absolute_import, division, print_function
import os
import sys
from collections import Counter
from typing import List, Text
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
import tensorflow as tf
from six import string_types
from tqdm import tqdm
from odin.utils.crypto import md5_checksum
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
tf.random.set_seed(1)
np.random.seed(1)
HEADER = [
'hgnc_id', 'symbol', 'name', 'locus_group', 'locus_type', 'status',
'location', 'location_sortable', 'alias_symbol', 'alias_name',
'prev_symbol', 'prev_name', 'gene_family', 'gene_family_id',
'date_approved_reserved', 'date_symbol_changed', 'date_name_changed',
'date_modified', 'entrez_id', 'ensembl_gene_id', 'vega_id', 'ucsc_id',
'ena', 'refseq_accession', 'ccds_id', 'uniprot_ids', 'pubmed_id', 'mgd_id',
'rgd_id', 'lsdb', 'cosmic', 'omim_id', 'mirbase', 'homeodb', 'snornabase',
'bioparadigms_slc', 'orphanet', 'pseudogene.org', 'horde_id', 'merops',
'imgt', 'iuphar', 'kznf_gene_catalog', 'mamit-trnadb', 'cd', 'lncrnadb',
'enzyme_id', 'intermediate_filament_db', 'rna_central_ids', 'lncipedia',
'gtrnadb', 'agr'
]
FILTERED_HEADER = [
'ensembl_gene_id', 'name', 'symbol', 'alias_symbol', 'alias_name',
'locus_type', 'location', 'cd', 'uniprot_ids', 'enzyme_id'
]
PROTEIN_CODING_TEMPLATE = r"ftp://ftp.ebi.ac.uk/pub/databases/genenames/hgnc/tsv/locus_groups/protein-coding_gene_chr_{chro}.txt"
NON_CODING_TEMPLATE = r"ftp://ftp.ebi.ac.uk/pub/databases/genenames/hgnc/tsv/locus_groups/non-coding_RNA_chr_{chro}.txt"
CHROMOSOMES = list(range(1, 22 + 1)) + ['X', 'Y', 'mitochondria']
def _download_file(chromosome, url, path) -> pd.DataFrame:
name = os.path.basename(url)
filename = os.path.join(path, name)
if not os.path.exists(filename):
prog = tqdm(desc=f"Download {name}", unit="kB")
def progress(blocknum, bs, size):
if prog.total is None:
prog.total = size // 1024
prog.update(bs * blocknum // 1024 - prog.n)
urlretrieve(url=url, filename=filename, reporthook=progress)
prog.clear()
prog.close()
# read the tsv file
data = []
with open(filename, 'r') as f:
for line in f:
line = [i.replace('"', '') for i in line[:-1].split("\t")]
data.append(line)
data = np.asarray(data)
assert data.shape[1] == 52, \
f"Expect 52 columns, parsed data has shape:{data.shape}"
assert np.all(data[0] == HEADER), f"Unknown header: {data[0]}"
# convert to DataFrame
data =
|
pd.DataFrame(data[1:], columns=data[0])
|
pandas.DataFrame
|
"""Example module in template package."""
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_validate, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import SGDClassifier
from sklearn import svm
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
from geo import *
__all__ = ['Tool']
class Tool(object):
"""Class to interact with a postcode database file."""
def __init__(self, postcode_file='', sample_labels='',
household_file=''):
"""
Parameters
----------
postcode_file : str, optional
Filename of a .csv file containing geographic location
data for postcodes.
sample_labels : str, optional
Filename of a .csv file containing sample data on property
values and flood risk labels.
household_file : str, optional
Filename of a .csv file containing information on households
by postcode.
"""
if postcode_file == '':
postcode_file = os.sep.join((os.path.dirname(__file__),
'resources',
'postcodes_unlabelled.csv'))
if sample_labels == '':
sample_labels = os.sep.join((os.path.dirname(__file__),
'resources',
'postcodes_sampled.csv'))
if household_file == '':
household_file = os.sep.join((os.path.dirname(__file__),
'resources',
'households_per_sector.csv'))
self.label = pd.read_csv(sample_labels)
self.postcodes = pd.read_csv(postcode_file)
self.house_label = pd.read_csv(household_file)
def get_easting_northing(self, postcodes):
"""Get a frame of OS eastings and northings from a collection
of input postcodes.
Parameters
----------
postcodes: sequence of strs
Sequence of postcodes.
Returns
-------
pandas.DataFrame
DataFrame containing only OSGB36 easthing and northing indexed
by the input postcodes. Invalid postcodes (i.e. not in the
input unlabelled postcodes file) return as NaN.
"""
if type(postcodes) is str:
postcodes = [postcodes]
postcode_df = self.postcodes
postcode_df = postcode_df.fillna('np.nan')
postcode_df_index = postcode_df.set_index('postcode')
df = pd.DataFrame(columns=(['sector','easting', 'northing','localAuthority']))
for i in range(len(postcodes)):
if postcodes[i] in postcode_df['postcode'].tolist():
df.loc[postcodes[i]] = postcode_df_index.loc[postcodes[i]]
else:
df.loc[postcodes[i]] = np.NaN
del df['sector']
del df['localAuthority']
return df
def get_lat_long(self, postcodes):
"""Get a frame containing GPS latitude and longitude information for a
collection of of postcodes.
Parameters
----------
postcodes: sequence of strs
Sequence of postcodes.
Returns
-------
pandas.DataFrame
DataFrame containing only WGS84 latitude and longitude pairs for
the input postcodes. Invalid postcodes (i.e. not in the
input unlabelled postcodes file) return as NAN.
"""
NE = self.get_easting_northing(postcodes)
east = NE['easting']
north = NE['northing']
lat_long = []
for i in range(len(NE)):
postcode = postcodes[i]
if np.isnan(east[i]):
lat = np.NaN
long = np.NaN
else:
a = get_gps_lat_long_from_easting_northing([east[i]],[north[i]],rads=False)
lat = int(a[0])
long = int(a[1])
lat_long.append([postcode,lat,long])
postcode_df = pd.DataFrame(lat_long, columns=('postcode','lat','lon'))
postcode_df = postcode_df.set_index('postcode')
return postcode_df
def get_easting_northing_sample(self, postcodes):
"""Get a frame of OS eastings and northings from a collection
of input postcodes.
Parameters
----------
postcodes: sequence of strs
Sequence of postcodes.
Returns
-------
pandas.DataFrame
DataFrame containing only OSGB36 easthing and northing indexed
by the input postcodes. Invalid postcodes (i.e. not in the
input sampled postcodes file) return as NaN.
"""
if type(postcodes) is str:
postcodes = [postcodes]
postcode_df = self.label
postcode_df = postcode_df.fillna('np.nan')
postcode_df_index = postcode_df.set_index('postcode')
df = pd.DataFrame(columns=(['sector','easting', 'northing','localAuthority','riskLabel','medianPrice']))
for i in range(len(postcodes)):
if postcodes[i] in postcode_df['postcode'].tolist():
df.loc[postcodes[i]] = postcode_df_index.loc[postcodes[i]]
else:
df.loc[postcodes[i]] = np.NaN
del df['sector']
del df['localAuthority']
del df['riskLabel']
del df['medianPrice']
return df
def get_lat_long_sample(self, postcodes):
"""Get a frame containing GPS latitude and longitude information for a
collection of of postcodes.
Parameters
----------
postcodes: sequence of strs
Sequence of postcodes.
Returns
-------
pandas.DataFrame
DataFrame containing only WGS84 latitude and longitude pairs for
the input postcodes. Invalid postcodes (i.e. not in the
input sampled postcodes file) return as NAN.
"""
NE = self.get_easting_northing_sample(postcodes)
east = NE['easting']
north = NE['northing']
lat_long = []
for i in range(len(NE)):
postcode = postcodes[i]
if np.isnan(east[i]):
lat = np.NaN
long = np.NaN
else:
a = get_gps_lat_long_from_easting_northing([east[i]],[north[i]],rads=False)
lat = int(a[0])
long = int(a[1])
lat_long.append([postcode,lat,long])
postcode_df = pd.DataFrame(lat_long, columns=('postcode','lat','lon'))
postcode_df = postcode_df.set_index('postcode')
return postcode_df
@staticmethod
def get_flood_class_methods():
"""
Get a dictionary of available flood probablity classification methods.
Returns
-------
dict
Dictionary mapping classification method names (which have
no inate meaning) on to an identifier to be passed to the
get_flood_probability method.
"""
return {'random_forest': 0, "RF_balanced": 1, "SGD_Classifier": 2, "knn": 3, 'GBC': 4}
def get_flood_class(self, postcodes, method=0):
"""
Generate series predicting flood probability classification
for a collection of poscodes.
Parameters
----------
postcodes : sequence of strs
Sequence of postcodes.
method : int (optional)
optionally specify (via a value in
self.get_flood_probability_methods) the classification
method to be used.
Returns
-------
pandas.Series
Series of flood risk classification labels indexed by postcodes.
"""
# print("asdas", self.label)
X = self.label[["easting","northing"]]
y = self.label['riskLabel']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42) # Holdout
northing_eastings = self.get_easting_northing(postcodes)
# print(northing_eastings, 'asdsa')
# northing_eastings = X.iloc[0:2]
# print(self.get_flood_class_methods(), 'asd')
if method == self.get_flood_class_methods()["random_forest"]:
model = RandomForestClassifier(criterion = 'gini', max_features = 'log2',
class_weight = {1: 10, 2: 10, 3: 10, 4: 1, 5: 15, 6: 15, 7: 10, 8: 150, 9: 300, 10: 300})
model.fit(X_train, y_train)
if method == self.get_flood_class_methods()["RF_balanced"]:
over = SMOTE(sampling_strategy='not majority', random_state=41)
under = RandomUnderSampler(sampling_strategy={1:500}, random_state=43)
steps = [('u', under)] #, ('o', over)
pipeline = Pipeline(steps=steps)
X_train, y_train = pipeline.fit_resample(X_train, y_train)
model = RandomForestClassifier(criterion = 'gini', max_features = 'log2',
class_weight = {1: 10, 2: 10, 3: 10, 4: 1, 5: 15, 6: 15, 7: 10, 8: 150, 9: 300, 10: 300})
model.fit(X_train, y_train)
if method == self.get_flood_class_methods()["SGD_Classifier"]:
model = SGDClassifier(loss='hinge', penalty='l1', alpha=1/20)
model.fit(X_train, y_train)
if method == self.get_flood_class_methods()["knn"]:
model = KNeighborsClassifier(n_neighbors=20)
model.fit(X_train, y_train)
if method == self.get_flood_class_methods()["GBC"]:
model = GradientBoostingClassifier(random_state=1)
model.fit(X_train, y_train)
y_new = model.predict(northing_eastings)
return pd.Series(data=y_new,
index=np.asarray(postcodes),
name='riskLabel')
@staticmethod
def get_house_price_methods():
"""
Get a dictionary of available flood house price regression methods.
Returns
-------
dict
Dictionary mapping regression method names (which have
no inate meaning) on to an identifier to be passed to the
get_median_house_price_estimate method.
"""
return {'all_england_median': 0, 'another_1': 1, 'Decision_tree_regressor': 2}
def get_median_house_price_estimate(self, postcodes, method=2):
"""
Generate series predicting median house price for a collection
of poscodes.
Parameters
----------
postcodes : sequence of strs
Sequence of postcodes.
method : int (optional)
optionally specify (via a value in
self.get_house_price_methods) the regression
method to be used.
Returns
-------
pandas.Series
Series of median house price estimates indexed by postcodes.
"""
df = self.label
df['outwardDistrict'] = df['postcode'].apply(lambda x: x.split(' ')[0])
df['sec_num']=df['sector'].apply(lambda x: x.split(' ')[1][0])
if method == 0:
return pd.Series(data=np.full(len(postcodes), 245000.0),
index=np.asarray(postcodes),
name='medianPrice')
elif method == 1: # another one
median_price = []
for code in postcodes:
if code in df['postcode'].values:
median_price.append(df[df['postcode']==code]['medianPrice'].values[0])
elif code.split(' ')[0]+' '+code.split(' ')[1][0] in df['sector'].values:
sec = code.split(' ')[0]+' '+code.split(' ')[1][0]
median_price.append(df[df['sector'] == sec]['medianPrice'].mean())
elif code.split(' ')[0] in df['outwardDistrict'].values:
district = df[df['outwardDistrict'] == code.split(' ')[0]]
X_test = code.split(' ')[1][0]
KNN_model = KNeighborsRegressor(n_neighbors=1,weights='distance', n_jobs=-1)
X = district[['sec_num']]
y = district['medianPrice']
KNN_model.fit(X,y)
y_pred = KNN_model.predict(pd.DataFrame([X_test], columns=['sec_num']))
median_price.append(y_pred[0])
else:
median_price.append(np.nan)
return pd.Series(data= median_price, index = postcodes, name='another_one')
elif method == 2: # Decision tree regressor
median_price = []
for code in postcodes:
if code in df['postcode'].values:
median_price.append(df[df['postcode']==code]['medianPrice'].values[0])
elif code.split(' ')[0]+' '+code.split(' ')[1][0] in df['sector'].values:
sec = code.split(' ')[0]+' '+code.split(' ')[1][0]
median_price.append(df[df['sector'] == sec]['medianPrice'].mean())
elif code.split(' ')[0] in df['outwardDistrict'].values:
district = df[df['outwardDistrict'] == code.split(' ')[0]]
X_test = code.split(' ')[1][0] # sector
dtree = DecisionTreeRegressor(max_depth=5, min_samples_leaf=0.13, random_state=3)
X = district[['sec_num']]
y = district['medianPrice']
dtree.fit(X, y)
y_pred = dtree.predict(
|
pd.DataFrame([X_test], columns=['sec_num'])
|
pandas.DataFrame
|
# coding: utf-8
# In[1]:
import pandas as pd
tweets = pd.read_csv("tweets.csv")
tweets.head()
# In[3]:
def get_candidate(row):
candidates = []
text = row["text"].lower()
if "clinton" in text or "hillary" in text:
candidates.append("clinton")
if "trump" in text or "donald" in text:
candidates.append("trump")
if "sanders" in text or "bernie" in text:
candidates.append("sanders")
return ",".join(candidates)
tweets["candidate"] = tweets.apply(get_candidate,axis=1)
tweets.head()
# In[14]:
import matplotlib.pyplot as plt
import numpy as np
counts = tweets["candidate"].value_counts()
plt.bar(range(len(counts)), counts)
plt.show()
# In[16]:
#用户年龄统计
from datetime import datetime
tweets["created"] =
|
pd.to_datetime(tweets["created"])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Part of slugdetection package
@author: <NAME>
github: dapolak
"""
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from slugdetection.Slug_Detection import Slug_Detection
import unittest
class Test_Slug_Detection(unittest.TestCase):
"""
Unitest class for the Slug Detection class
"""
def test_create_class(self, spark_data):
"""
Unit test for class creation
Parameters
----------
spark_data : Spark data frame
well data frame
"""
test_class = Slug_Detection(spark_data)
assert hasattr(test_class, "well_df"), "Assert well_df attribute is created"
assert len(test_class.well_df.head(1)) != 0, \
"well_df attribute not empty" # Pyspark has no clear empty attribute
def test_jump(self, spark_data):
"""
Unit test for jump method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="12-SEP-16 09:09",
end="18-SEP-16 09:09") # known interval that has 3 section of data over 99% choke
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.jump()
assert 'count_id' in test_class.pd_df.columns, "Assert new count_id column was created"
assert test_class.pd_df['count_id'].nunique() >= 3, \
"For this example, assert that there are three continuous sets of data"
def test_clean_short_sub(self, spark_data):
"""
Unit test for clean_short_sub method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="12-SEP-16 09:09",
end="18-SEP-16 09:09") # known interval that has 3 section of data over 99% choke
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.jump()
a = len(test_class.pd_df) # Store length of pd_df data frame
test_class.clean_short_sub(min_df_size=200) # Apply clean_short_sub method
b = len(test_class.pd_df) # Store length of pd_df data frame
assert a > b, "For this example, the post clean_short_sub pd_df attribute should be shorter"
def test_sub_data(self, spark_data):
"""
Unit test for clean_short_sub method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="12-SEP-16 09:09",
end="18-SEP-16 09:09") # known interval that has 3 section of data over 99% choke
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.sub_data(min_df_size=200)
assert hasattr(test_class, "sub_df_dict"), "New attribute must have been created"
a = test_class.pd_df["count_id"].nunique()
assert a == len(test_class.sub_df_dict), "Number of unique count ids must be the same as number of data " \
"frames in sub_df_dict dictionary"
a = test_class.sub_df_dict[0] # Get first element of the dictionary
assert isinstance(a, pd.DataFrame), "sub_df_dict elements are pandas data frames"
for f in test_class.features:
assert f in a.columns, "data frame must contain all features"
def test_slug_check(self, spark_data):
"""
Unit test for slug_check method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering steps
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="18-SEP-16 01:09", end="18-SEP-16 09:09") # example interval
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
test_class.sub_data()
## Test 1 : Test that slug_check returns right value
##
# Create fake dataframe
datetime_format = '%d-%b-%y %H:%M' # datetime date format
base = datetime.strptime("01-JAN-16 09:09", datetime_format) # Create datetime type timestamp
date_list = [[base + timedelta(minutes=x)] for x in range(1000)] # Create list of timestamps
x = np.linspace(0, 100 * np.pi, 1000) # Get evenly spaced x array
whp_list = (np.sin(x) * 3) + 10 # Create sin wave array (slug-like)
fake_df = pd.DataFrame(data=date_list, columns=["ts"], dtype=str) # Create data frame with timestamp
fake_df["ts"] = pd.to_datetime(fake_df["ts"]) # Ensure timestamp are datetime type
fake_df["WH_P"] = whp_list # Add sine wave as WHP data
test_class.sub_df_dict = {
1: fake_df
} # Override sub_df_dict attribute with fake data frame
slug_idx = pd.Series(whp_list)[whp_list > 12.90].index.tolist() # Create list of slug peaks for fake slugs
first = test_class.slug_check(slug_idx, 1) # Get results from slug_check method
assert len(first) == 1, "First slug index list should only contain one value in this example"
## Test 2 : Test that slug_check returns right value
##
# Create fake data frame
datetime_format = '%d-%b-%y %H:%M' # datetime date format
base = datetime.strptime("01-JAN-16 09:09", datetime_format) # Create datetime type timestamp
date_list = [[base + timedelta(minutes=x)] for x in range(2300)] # Create list of timestamps
x = np.linspace(0, 100 * np.pi, 1000) # Get evenly spaced x array
whp_list = (np.sin(x) * 3) + 10 # Create sin wave array (slug-like)
whp_list = np.append(whp_list, [10 for i in range(300)]) # Add flat flow to simulate normal flow
whp_list = np.append(whp_list, (np.sin(x) * 3) + 10) # Add more slugs
fake_df = pd.DataFrame(data=date_list, columns=["ts"], dtype=str) # Create data frame with timestamp
fake_df["ts"] = pd.to_datetime(fake_df["ts"]) # Ensure timestamp are datetime type
fake_df["WH_P"] = whp_list # Add fake whp data
slug_idx = pd.Series(whp_list)[whp_list > 12.90].index.tolist() # Create list of slug peaks
test_class.sub_df_dict = {
1: fake_df
} # Override sub_df_dict attribute with fake data frame
first = test_class.slug_check(slug_idx, 1) # Get results from slug_check method
assert first, "First slug index list should not be empty"
assert len(first) == 2, "First slug index list should only contain two value in this example"
assert first[1] == 1305, "In this example, the second first slug of the data set occurs at minutes = 1305"
def test_label_slugs(self, spark_data):
"""
Unit test for label_slugs method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering steps
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="18-SEP-16 01:09", end="30-SEP-16 09:09") # example interval
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
try:
f, s = test_class.label_slugs()
print("Sub df dict attribute has not been created")
raise ValueError
except AssertionError:
pass
test_class.sub_data() # Create sub df dict
# create fake data set
datetime_format = '%d-%b-%y %H:%M'
base = datetime.strptime("01-JAN-16 09:09", datetime_format)
date_list = [[base + timedelta(minutes=x)] for x in range(1000)] # Creat time, one minute appart
x = np.linspace(0, 100 * np.pi, 1000)
whp_list = (np.sin(x) * 3) + 10 # create sin wave
fake_df = pd.DataFrame(data=date_list, columns=["ts"], dtype=str)
fake_df["ts"] = pd.to_datetime(fake_df["ts"])
fake_df["WH_P"] = whp_list
# overide
test_class.sub_df_dict = {
1: fake_df,
2: pd.DataFrame(data=[[0, 0], [0, 0]], columns=["ts", "WH_P"])
}
# This should create
f, s = test_class.label_slugs()
assert s, "Assert slug index list is not empty"
assert f, "Assert first slug index list not empty"
assert len(s[0]) == 49, "In this example, there should be 50 slug peaks"
assert len(s) == 2, "In this example, there should be one list of slug peaks"
def test_format_data(self, spark_data):
"""
Unit test for format_data method
Parameters
----------
spark_data : Spark data frame
well data frame
"""
# Standard Data Engineering steps
test_class = Slug_Detection(spark_data)
test_class.timeframe(start="18-SEP-16 01:09", end="18-SEP-16 09:09") # example interval
test_class.data_range(verbose=False)
test_class.clean_choke(method="99")
sd_df = test_class.df_toPandas()
try:
f, s = test_class.label_slugs()
print("Sub df dict attribute has not been created")
raise ValueError
except AssertionError:
pass
test_class.sub_data() # Create sub df dict
## Example 1
##
# create fake data set
datetime_format = '%d-%b-%y %H:%M' # datetime date format
base = datetime.strptime("01-JAN-16 09:09", datetime_format) # Create datetime type timestamp
date_list = [[base + timedelta(minutes=x)] for x in range(2600)] # Create list of timestamps
x = np.linspace(0, 100 * np.pi, 1000) # Get evenly spaced x array
whp_list = np.array([10 for i in range(300)]) # Create whp list with normal flow behaviour
whp_list = np.append(whp_list, (np.sin(x) * 3) + 10) # Add sin wave array (slug-like)
whp_list = np.append(whp_list, [10 for i in range(300)]) # Add flat flow to simulate normal flow
whp_list = np.append(whp_list, (np.sin(x) * 3) + 10) # Add more slugs
fake_df =
|
pd.DataFrame(data=date_list, columns=["ts"], dtype=str)
|
pandas.DataFrame
|
#Rule - To replace multiple spaces in the content of the text, voice and voice_only
def multiple_spaces_in_txt(fle, fleName, target):
import re
import os
import sys
import json
import openpyxl
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
configFile = 'https://s3.us-east.cloud-object-storage.appdomain.cloud/sharad-saurav-bucket/Configuration.xlsx'
rule="Multiple_Spaces_in_txt"
config=pd.read_excel(configFile)
newdf=config[config['RULE']==rule]
to_check=''
for index,row in newdf.iterrows():
to_check=row['TO_CHECK']
to_check=json.loads(to_check)
files_to_apply=to_check['files_to_apply']
columns_to_apply=to_check['columns_to_apply']
if(files_to_apply=='ALL' or fleName in files_to_apply):
data=[]
regex = r"\s{2,}"
def check_multiple_space(string):
if(re.search(regex,string)):
return True
else:
return False
df = pd.read_excel(fle)
df.index = range(2,df.shape[0]+2)
for index,row in df.iterrows():
for column_name in columns_to_apply:
column_value=row[column_name]
if(
|
pd.notnull(row[column_name])
|
pandas.notnull
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 11:52:00 2020
@author: <NAME>
Ewp
Takes a little while to run.
This code is a pretty average (poorly named variables and reuse of dat, and different names for the moorings).
All of the values are calculated on the fly and printed. Not saved anywhere.
Quite inefficient, but provided as is.
Recommended to turn warnings off to save the data and put in text.
Could have turned this into a function. Have refractored where possible but this script is provided as is.
Requirements:
processed/combined_dataset/month_data_exports.nc
processed/flux/pco2grams.nc
Produces:
figs/Figure5a_ENSO_seasonality.png
"""
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
from carbon_math import *
from matplotlib.dates import MonthLocator, DateFormatter
from matplotlib.ticker import FuncFormatter
xl0=0.0
yl0=0.18
xl1=0.0
yl1=0.18
xl2=-0.09
yl2=0
lanina=pd.read_csv('processed/indexes/la_nina_events.csv')
cp_nino=pd.read_csv('processed/indexes/cp_events.csv')
ep_nino=pd.read_csv('processed/indexes/ep_events.csv')
fp='processed/combined_dataset/month_data_exports.nc'
info=xr.open_mfdataset(fp).sel(Mooring=195).to_dataframe()
#Process EP, CP and Nino events.
nina=pd.DataFrame()
ep=pd.DataFrame()
cp=pd.DataFrame()
for i in lanina.iterrows(): nina=nina.append(info[slice(i[1].start,i[1].end)])
for i in ep_nino.iterrows(): ep=ep.append(info[slice(i[1].start,i[1].end)])
for i in cp_nino.iterrows(): cp=cp.append(info[slice(i[1].start,i[1].end)])
nina_dates=nina.index
ep_dates=ep.index[4:]
cp_dates=cp.index
# %% Load Useful files
seamask=xr.open_dataset('processed/seamask.nc') #Because 2020 version doesn't have it.
seamask= seamask.assign_coords(lon=(seamask.lon % 360)).roll(lon=(seamask.dims['lon']),roll_coords=False).sortby('lon')
#landsch_fp='datasets/co2/landschutzer_co2/spco2_MPI_SOM-FFN_v2018.nc'
landsch_fp='datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc'
landschutzer=xr.open_dataset(landsch_fp)
landschutzer= landschutzer.assign_coords(lon=(landschutzer.lon % 360)).roll(lon=(landschutzer.dims['lon']),roll_coords=False).sortby('lon') #EPIC 1 line fix for the dateline problem.
land_pac=landschutzer.sel(lon=slice(120,290),lat=slice(-20,20))
land_pac_all=landschutzer.sel(lon=slice(120,290),lat=slice(-20,20))
land_pac=land_pac.fgco2_smoothed
atmco2=land_pac_all.atm_co2
dco2=land_pac_all.dco2
pco2=land_pac_all.spco2_smoothed
kw=land_pac_all.kw
f_ratios=xr.open_mfdataset('processed/flux/fratios.nc')
ratio=f_ratios.laws2011a#laws2000#laws2000,laws2011a,laws2011b,henson2011
npp1=xr.open_dataset('processed/flux/avg_npp_rg_cafe.nc')
avg_npp=(npp1.avg_npp/1000)*ratio
land=moles_to_carbon(land_pac)/365 #LANDSCHUTZ
land['time']=land.time.astype('datetime64[M]')
diff=land-avg_npp
diff1=diff.where((diff<0.1)|(diff<-0.1),np.nan)
sst = xr.open_dataset('datasets/sst/sst.mnmean.nc')
sst= sst.assign_coords(lon=(sst.lon % 360)).roll(lon=(sst.dims['lon']),roll_coords=False).sortby('lon') #EPIC 1 line fix for the dateline problem.
sst=sst.sel(lon=slice(120,290),lat=slice(20,-20)).sst
sst=sst.where(seamask.seamask==1)
#startday=np.datetime64('2000-01-01')
#endday=np.datetime64('2019-12-01')
#wu=xr.open_dataset('datasets/uwnd.mon.mean.nc').sel(level=1000,lat=0,lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).uwnd
#wv=xr.open_dataset('datasets/vwnd.mon.mean.nc').sel(level=1000,lat=0,lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).vwnd
wu=xr.open_dataset('datasets/uwnd.10m.mon.mean.nc').sel(level=10,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).uwnd
wv=xr.open_dataset('datasets/vwnd.10m.mon.mean.nc').sel(level=10,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).vwnd
ws_ncep2=np.sqrt((wu**2)+(wv**2))
#CHeck line 164 depending if using NCEP2 or windspeed
ws=xr.open_dataarray('datasets/CCMP_windspeed.nc')
#wind=uw.sel(lat=)
precip= xr.open_dataset('datasets/precip.mon.mean.enhanced.nc').sel(lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).precip
newprod=avg_npp.sel(lat=slice(-15,15))#.interpolate_na(dim='time').sel(time=slice(startday,endday))
co2=land.sel(lat=slice(-15,15))#.interpolate_na(dim='time').sel(time=slice(startday,endday))
pco2=pco2.sel(lat=slice(-15,15))#.interpolate_na(dim='time').sel(time=slice(startday,endday))
pco2['time']=pco2.time.astype('datetime64[M]')
kw['time']=kw.time.astype('datetime64[M]')
dco2['time']=dco2.time.astype('datetime64[M]')
#pco2=pco2_intrp
kw1=kw.sel(lat=slice(-15,15))#.interpolate_na(dim='time').sel(time=slice(startday,endday))
sst=sst.sel(lat=slice(15,-15))#.interpolate_na(dim='time').sel(time=slice(startday,endday))
chl=xr.open_dataset('processed/flux/tpca.nc').tpca#'sw_month.nc')
chl['time']=chl.time.astype('datetime64[M]')
# %%
#Test windspeed
minlat=-2
maxlat=2
#enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col='Year')
enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col=0,header=None)
enso=enso.iloc[3:] #Just so Both EMI and MEI start in 1981-01-01
enso_flat=enso.stack()
enso_dates=pd.date_range('1982','2020-07-01',freq='M')-
|
pd.offsets.MonthBegin(1)
|
pandas.offsets.MonthBegin
|
import plotly.graph_objs as go
from plotly.colors import n_colors
# to handle data retrieval
import urllib3
from urllib3 import request
# to handle certificate verification
import certifi
# to manage json data
import json
# for pandas dataframes
import pandas as pd
base_url = 'https://www.opendata.nhs.scot/api/3/action/datastore_search_sql?sql='
#Neighbourhoods
nbh_id = '8906de12-f413-4b3f-95a0-11ed15e61773'
#Daily Cases total
daily_id = '287fc645-4352-4477-9c8c-55bc054b7e76'
# Cases by local authority
by_hb_id = '427f9a25-db22-4014-a3bc-893b68243055'
# Hospital Onset
hosp_id = '5acbccb1-e9d6-4ab2-a7ac-f3e4d378e7ec'
def get_api(sql_query):
base_url = 'https://www.opendata.nhs.scot/api/3/action/datastore_search_sql?sql='
# handle certificate verification and SSL warnings
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
final_url = base_url + sql_query
r = http.request('GET', final_url)
# decode json data into a dict object
data = json.loads(r.data.decode('utf-8'))
df = pd.json_normalize(data['result']['records'])
return df
def latest_date(resource_id):
date_df = get_api(
f'''
SELECT "Date"
FROM "{resource_id}"
ORDER BY "Date" DESC
LIMIT 1
''')
latest_date = date_df.iloc[0,0]
return latest_date
def month_before(resource_id, n_days):
date_df = get_api(
f'''
SELECT DISTINCT "Date"
FROM "{resource_id}"
ORDER BY "Date" DESC
LIMIT {n_days}
''')
_n_days = date_df.iloc[(n_days-1),0]
return _n_days
#Daily Cases over time
def daily():
'''
returns most recent daily case numbers
from the Scottish Goverment
'''
sql_daily = f'''
SELECT "Date", "DailyCases"
FROM "{daily_id}"
ORDER BY "Date" ASC
'''
daily_df = get_api(sql_daily)
daily_df.columns = ['DateStr', 'DailyCases']
daily_df['Date'] = daily_df['DateStr'].apply(lambda x: pd.to_datetime(str(x),\
format='%Y%m%d'))
daily_df = daily_df.drop('DateStr', axis = 1)
daily_df['AvgCases'] = daily_df['DailyCases'].rolling(14,\
win_type='triang', min_periods=1).mean()
return daily_df
#Top Neighbourhoods Daily
def top_nbh_daily():
'''
Returns top 50 Neighbourhoods with new cases in Scotland
according to Scottish Goverment Data
'''
day = latest_date(nbh_id)
sql_nbh = f'''
SELECT "Date", "IntZoneName", "Positive7Day"
FROM "{nbh_id}"
WHERE "Date" = {day}
'''
nbh_df = get_api(sql_nbh)
nbh_df['Positive7Day'] = pd.to_numeric(nbh_df['Positive7Day'])
nbh_df = nbh_df.sort_values(by=['Positive7Day'], ascending = False)\
.reset_index(drop = True)
nbh_df.columns = ['DateStr', 'Neighbourhood', 'Weekly Positive']
nbh_df['Date'] = nbh_df['DateStr'].apply(lambda x: pd.to_datetime(str(x),\
format='%Y%m%d'))
nbh_df = nbh_df.drop('DateStr', axis = 1)
return nbh_df.iloc[:10,:]
#Daily Cases by Local Authority
def top_la_daily():
'''
Returns Local Authority new Covid case numbers
'''
day = latest_date(by_hb_id)
sql_hb = f'''
SELECT "Date", "CAName", "DailyPositive"
FROM "{by_hb_id}"
WHERE "Date" = {day}
'''
hb_df = get_api(sql_hb)
hb_df['DailyPositive'] = pd.to_numeric(hb_df['DailyPositive'])
hb_df = hb_df.sort_values(by=['DailyPositive'], ascending = False)\
.reset_index(drop = True)
hb_df.columns = ['DateStr','Daily Positive', 'Local Authority']
hb_df['Date'] = hb_df['DateStr'].apply(lambda x: pd.to_datetime(str(x),\
format='%Y%m%d'))
return hb_df.drop('DateStr', axis = 1)
def snap_shot(n_days):
'''
Returns a snap shot of case numbers in Local Authority over time
'''
day = latest_date(by_hb_id)
n_days = 5
_n_days = month_before(by_hb_id, n_days)
sql_hb_m = f'''
SELECT "Date", "CAName", "PositivePercentage"
FROM "{by_hb_id}"
WHERE "Date" BETWEEN {_n_days} AND {day}
ORDER BY "Date" DESC
'''
hb_m_df = get_api(sql_hb_m)
hb_m_df['PositivePercentage'] = pd.to_numeric(hb_m_df['PositivePercentage'])
hb_m_df.columns = ['DateStr', 'PositivePercentage', 'Local Authority']
hb_m_df['Date'] = hb_m_df['DateStr'].apply(lambda x: pd.to_datetime(str(x),\
format='%Y%m%d'))
hb_m_df = hb_m_df.drop('DateStr', axis = 1)
return hb_m_df
#Creates Figures
def return_figures():
"""Creates four plotly visualizations
Args:
None
Returns:
list (dict): list containing the four plotly visualizations
"""
#New Covic cases over time for Scotland
df_one = daily()
df_one = df_one.set_index('Date').stack().reset_index()
df_one.columns = ['Date', 'casetype', 'Cases']
df_one = df_one.iloc[-100:,:]
graph_one = []
ct_list = df_one.casetype.unique().tolist()
for ct in ct_list:
x_val = df_one[df_one.casetype == ct]['Date'].tolist()
y_val = df_one[df_one.casetype == ct]['Cases'].tolist()
graph_one.append(go.Scatter(
x = x_val,
y = y_val,
mode = 'lines',
name = ct
),
)
layout_one = dict(title = 'Daily COVID-19 Cases in Scotland',
xaxis = dict(title = 'Date'),
yaxis = dict(title = 'New Cases'),
font=dict( color="black"
)
)
#Top ten table of highest weekly covid Neighbourhoods
df_two = top_nbh_daily()
df_two = df_two.drop(['Date'], axis= 1)
colors = n_colors('rgb(200, 0, 0)', 'rgb(255, 200, 200)', 10, colortype='rgb')
df_two['c'] = colors
graph_two = []
graph_two.append(go.Table(
header=dict(
values=["<b>Neighbourhood<b>", "<b>Weekly Positive<b>"],
line_color='darkslategrey', fill_color='white',
align='center',
font=dict(color='black', size=12)
),
cells=dict(values=[df_two['Neighbourhood'], df_two['Weekly Positive']],
line_color=[df_two['c']],
fill_color=[df_two['c']],
align='left',
font=dict(color='white', size=12)
)
)
)
#New Daily Cases by Local Authority
df_three = top_la_daily()
df_three_s = df_three.sort_values(by = 'Daily Positive')
graph_three = []
hb_list = df_three_s['Local Authority'].tolist()
for hb in hb_list:
x_val = df_three_s[df_three_s['Local Authority'] == hb]['Local Authority'].tolist()
y_val = df_three_s[df_three_s['Local Authority'] == hb]['Daily Positive'].tolist()
graph_three.append(go.Bar(y = x_val,x = y_val, name = hb,orientation='h'))
layout_three = dict(
title = 'Daily Cases',
xaxis = dict(
title = 'New Cases'
),
yaxis = dict(
autotick=False
),
autosize=False,
width=500,
height=900,
showlegend = False,
color = "y_val",
margin=dict(
l=150,r=50,b=50,t=75,pad=4
)
)
top_five = df_three['Local Authority'].iloc[0:5].tolist()
n_days = 10
df_four = snap_shot(n_days)
graph_four = []
for hb in top_five:
x_val = df_four[df_four['Local Authority'] == hb]['Date'].tolist()
y_val = df_four[df_four['Local Authority'] == hb]['PositivePercentage'].tolist()
graph_four.append(go.Scatter(
x = x_val,
y = y_val,
mode = 'lines',
name = hb,
)
)
layout_four = dict(title = 'Percent of Tests Positive',
xaxis = dict(title = 'Date'),
yaxis = dict(title = 'Positive Test Percent(%)'),
)
top_five_data = df_three.iloc[0:5,:]
everyone = df_three.iloc[:,0].sum()
top_5_tot = top_five_data.iloc[:,0].sum()
other = everyone - top_5_tot
other_date = top_five_data['Date'].iloc[0]
other_data = {'Daily Positive':[other],'Local Authority':'Other', 'Date':other_date}
other_pds = pd.DataFrame(data = other_data)
top_five_data =
|
pd.concat([top_five_data, other_pds])
|
pandas.concat
|
######################################################################################88
import numpy as np
from scipy import stats
from sklearn.metrics import pairwise_distances
import scipy
from scipy.stats import hypergeom, mannwhitneyu, linregress, norm, ttest_ind
#from scipy.sparse import issparse, csr_matrix
import scipy.sparse as sps
from statsmodels.stats.multitest import multipletests
from collections import Counter, OrderedDict
import scanpy as sc
from . import preprocess
from . import tcr_scoring
from . import util
from .tcrdist.all_genes import all_genes
from .tags import *
import sys
import pandas as pd
from sys import exit
import time #debugging
MIN_CONGA_SCORE = 1e-100
def _find_neighbor_neighbor_interactions(
adata,
nbrs_gex,
nbrs_tcr,
agroups,
bgroups,
pval_threshold,
counts_correction=0, # if there are a lot of maits, for example; this is the number
correct_overlaps_for_groups=True,
scale_pvals_by_num_clones=True,
verbose=False
):
''' This is a helper function used in graph-vs-graph analysis
it runs the GEX KNN graph vs TCR KNN graph comparison
Returns a pandas dataframe with results
of all clones with nbr-nbr overlaps having pvalues <= pval_threshold
AND a numpy array of the adjusted_pvals
'''
preprocess.add_mait_info_to_adata_obs(adata) # for annotation of overlaps
is_mait = adata.obs['is_invariant']
num_clones = len(nbrs_gex)
pval_rescale = num_clones if scale_pvals_by_num_clones else 1.0
results = []
adjusted_pvalues = [pval_rescale]*num_clones # initialize to big value
for ii in range(num_clones):
is_ii_group = (agroups==agroups[ii]) | (bgroups==bgroups[ii])
assert is_ii_group[ii]
actual_num_clones = num_clones - np.sum( is_ii_group ) - counts_correction
ii_nbrs_gex = nbrs_gex[ii] # might be slice of array, or list entry if use_sym_nbrs
ii_nbrs_tcr = nbrs_tcr[ii]
ii_nbrs_gex_set = frozenset( ii_nbrs_gex)
assert ii not in ii_nbrs_gex_set
num_neighbors_gex = len(ii_nbrs_gex)
num_neighbors_tcr = len(ii_nbrs_tcr)
overlap = sum( 1 for x in ii_nbrs_tcr if x in ii_nbrs_gex_set )
expected_overlap = float( num_neighbors_gex*num_neighbors_tcr )/actual_num_clones
if overlap and overlap > expected_overlap:
nbr_pval = pval_rescale * hypergeom.sf( overlap-1, actual_num_clones, num_neighbors_gex, num_neighbors_tcr)
else:
nbr_pval = pval_rescale
if verbose:
print('nbr_overlap:', ii, overlap, nbr_pval)
adjusted_pvalues[ii] = nbr_pval
if nbr_pval > pval_threshold:
continue ## NOTE
double_nbrs = [ x for x in ii_nbrs_tcr if x in ii_nbrs_gex_set ]
assert overlap == len(double_nbrs)
if verbose:
print('nbr_overlap_nbrs:', ii, overlap, nbr_pval, double_nbrs)
overlap_corrected = overlap
if correct_overlaps_for_groups:
# this is heuristic
overlap_corrected = min(len(set(agroups[double_nbrs])),
len(set(bgroups[double_nbrs])) )
if overlap_corrected < overlap:
delta = overlap-overlap_corrected
nbr_pval = pval_rescale * hypergeom.sf(
overlap_corrected-1, actual_num_clones,
num_neighbors_gex-delta, num_neighbors_tcr-delta)
adjusted_pvalues[ii] = nbr_pval # update
if nbr_pval > pval_threshold:
continue ## NOTE
nbr_pval = max(nbr_pval, MIN_CONGA_SCORE) # no 0s
results.append(dict(conga_score=nbr_pval,
num_neighbors_gex=num_neighbors_gex,
num_neighbors_tcr=num_neighbors_tcr,
overlap=overlap,
overlap_corrected=overlap_corrected,
mait_fraction=np.sum(is_mait[double_nbrs])/overlap,
clone_index=ii ))#double_nbrs ] )
adjusted_pvalues = np.maximum(np.array(adjusted_pvalues), MIN_CONGA_SCORE)
return pd.DataFrame(results), adjusted_pvalues
def _find_neighbor_cluster_interactions(
adata,
nbrs,
clusters, # for example, or could be swapped: tcr/gex
agroups,
bgroups,
pval_threshold,
counts_correction=0, # if there are a lot of maits, for example; this is the number of them
correct_overlaps_for_groups=True,
scale_pvals_by_num_clones=True,
):
''' This is a helper function used in graph-vs-graph analysis
It computes KNN graph vs cluster graph overlaps
Returns a pandas dataframe with results
of all clones with nbr-cluster overlaps having pvalues <= pval_threshold
AND a numpy array of the adjusted_pvals
'''
preprocess.add_mait_info_to_adata_obs(adata) # for annotation of overlaps
is_mait = adata.obs['is_invariant']
num_clones = len(nbrs)
pval_rescale = num_clones if scale_pvals_by_num_clones else 1.0
results = []
adjusted_pvalues = [pval_rescale]*num_clones # initialize to big pval
for ii in range(num_clones):
is_ii_group = (agroups==agroups[ii]) | (bgroups==bgroups[ii])
assert is_ii_group[ii]
actual_num_clones = num_clones - np.sum(is_ii_group) - counts_correction
ii_nbrs = nbrs[ii]
ii_cluster = clusters[ii]
ii_cluster_clustersize = (np.sum(clusters==ii_cluster) -
np.sum((clusters==ii_cluster)&(is_ii_group)))
num_neighbors = ii_nbrs.shape[0]
overlap = np.sum( clusters[ii_nbrs] == ii_cluster )
expected = float(ii_cluster_clustersize*num_neighbors)/actual_num_clones
if overlap and overlap>expected:
nbr_pval = pval_rescale * hypergeom.sf(
overlap-1, actual_num_clones, num_neighbors,
ii_cluster_clustersize )
else:
nbr_pval = pval_rescale
adjusted_pvalues[ii] = nbr_pval
if nbr_pval > pval_threshold:
continue ## NOTE
same_cluster_nbrs = ii_nbrs[ clusters[ii_nbrs] == ii_cluster ]
assert len(same_cluster_nbrs)== overlap
overlap_corrected = overlap
if correct_overlaps_for_groups:
overlap_corrected = min(len(set(agroups[same_cluster_nbrs])),
len(set(bgroups[same_cluster_nbrs])))
if overlap_corrected < overlap:
delta = overlap-overlap_corrected
nbr_pval = pval_rescale * hypergeom.sf(
overlap_corrected-1, actual_num_clones, num_neighbors-delta,
ii_cluster_clustersize-delta )
adjusted_pvalues[ii] = nbr_pval
if nbr_pval > pval_threshold:
continue
mait_fraction=np.sum(is_mait[same_cluster_nbrs])/overlap
nbr_pval = max(nbr_pval, MIN_CONGA_SCORE) # no 0s
results.append(dict(conga_score=nbr_pval,
num_neighbors=num_neighbors,
cluster_size=ii_cluster_clustersize,
overlap=overlap,
overlap_corrected=overlap_corrected,
mait_fraction=mait_fraction,
clone_index=ii ))#double_nbrs ] )
adjusted_pvalues = np.maximum(np.array(adjusted_pvalues), MIN_CONGA_SCORE)
return pd.DataFrame(results), adjusted_pvalues
def check_nbr_graphs_indegree_bias(all_nbrs):
''' this routine looks at bias in the number of neighbor edges
going *into* each vertex (clonotype). By definition, each vertex
will have the same number going out, but not necessarily the same
number going in. This is especially true of the GEX graph.
Generally there is less bias in the TCR graph. If both graphs were biased
in the same direction (toward the same nodes), this could create
spurious graph-vs-graph signal.
So we look here at the correlation between the two biases.
all_nbrs is a dict mapping from nbr_frac to (gex_nbrs, tcr_nbrs)
setup by preprocess.calc_nbrs
'''
for nbr_frac in all_nbrs:
nbrs_gex, nbrs_tcr = all_nbrs[nbr_frac]
num_clones = nbrs_gex.shape[0]
# look at the in-degree distribution
expected_indegree = nbrs_gex.shape[1]
gex_counts = Counter(nbrs_gex.flatten())
gex_indegree_bias = np.array( [ gex_counts[x]/expected_indegree
for x in range(num_clones) ] )
print(f'gex_indegree_bias: nbr_frac= {nbr_frac:.4f}',
stats.describe(gex_indegree_bias))
tcr_counts = Counter(nbrs_tcr.flatten())
tcr_indegree_bias = np.array( [ tcr_counts[x]/expected_indegree
for x in range(num_clones) ] )
print(f'tcr_indegree_bias: nbr_frac= {nbr_frac:.4f}',
stats.describe(tcr_indegree_bias))
# any correlation?
# if there is strong correlation, this could skew the
# graph-vs-graph analysis
print(f'indegree_bias_correlation: nbr_frac= {nbr_frac:.4f}',
stats.linregress(gex_indegree_bias, tcr_indegree_bias))
def _make_csr_nbrs(nbrs):
row = []
for i, inbrs in enumerate(nbrs):
row.extend([i]*len(inbrs))
col = np.hstack(nbrs)
assert len(row) == len(col)
data = np.full((len(col),), 1)
return sps.csr_matrix((data, (row, col)), shape=(len(nbrs), len(nbrs)))
def _compute_nbr_overlap_slow(gex_nbrs, tcr_nbrs):
overlap = 0
for g_nbrs, t_nbrs in zip(gex_nbrs, tcr_nbrs):
g_nbrs_set = frozenset(g_nbrs)
overlap += sum(x in g_nbrs_set for x in t_nbrs)
return overlap
def _compute_graph_overlap_stats(
gex_nbrs,
tcr_nbrs,
num_random_repeats,
verbose=False,
swaptags=False,
max_calculation_time=2000,# in seconds
):
''' Helper function for graph-graph overlap summary analysis
see compute_graph_vs_graph_stats(...) function below
'''
starttime = time.time()
gtag,ttag = 'gex','tcr'
if swaptags:
gtag, ttag = ttag, gtag
N = len(gex_nbrs)
assert N == len(tcr_nbrs)
## if this will take too long, we may just estimate things
gex_edges = sum(len(x) for x in gex_nbrs)
tcr_edges = sum(len(x) for x in tcr_nbrs)
expected_overlap = (sum(len(x)*len(y) for x,y in zip(gex_nbrs, tcr_nbrs))/
(N-1))
# compute the bias in the number of incoming edges in the gex graph
expected_indegree = gex_edges/N
gex_counts = Counter()
for nbrs in gex_nbrs:
gex_counts.update(nbrs)
gex_indegree_bias = np.array([gex_counts[x]/expected_indegree
for x in range(N)])
gex_indegree_bias_stats = stats.describe(gex_indegree_bias)
if verbose:
print('gex_indegree_bias:', gex_indegree_bias_stats)
# compute the bias in the number of incoming edges in the tcr graph
expected_indegree = tcr_edges/N
tcr_counts = Counter()
for nbrs in tcr_nbrs:
tcr_counts.update(nbrs)
tcr_indegree_bias = np.array([tcr_counts[x]/expected_indegree
for x in range(N)])
tcr_indegree_bias_stats = stats.describe(tcr_indegree_bias)
if verbose:
print('tcr_indegree_bias:', tcr_indegree_bias_stats)
indegree_correlation = linregress(gex_indegree_bias, tcr_indegree_bias)
# this is a little silly: it's basically C*gex_edges * tcr_edges/nodes**2
# from smf.ols(f'log10_calculation_time ~ log10_nodes + log10_gex_edges + log10_tcr_edges'
# Intercept -4.453273
# log10_nodes -1.914652
# log10_gex_edges 0.958948
# log10_tcr_edges 1.044386
# this was fitted with num_random_repeats = 100
estimated_log10_calculation_time = (-1.9147 * np.log10(N)
+0.9589 * np.log10(gex_edges)
+1.0444 * np.log10(tcr_edges)
-4.4533)
estimated_calculation_time = (10**estimated_log10_calculation_time*
num_random_repeats/100.)
if estimated_calculation_time <= max_calculation_time:
M0 = _make_csr_nbrs(gex_nbrs)
M1 = _make_csr_nbrs(tcr_nbrs).tocoo()
M2 = M1.copy()
overlaps = []
for r in range(num_random_repeats+1):
if r:
p = np.random.permutation(N)
else:
p = np.arange(N)
M1.row = p[M2.row]
M1.col = p[M2.col]
overlap = M0.multiply(M1.tocsr()).sum()
if verbose and r%10==0:
print(f'{r:2d} {overlap:6d}')
overlaps.append(overlap)
o0 = overlaps[0]
m,s = np.mean(overlaps[1:]), np.std(overlaps[1:])
zscore_source = 'shuffling'
else:
zscore_source = 'fitting'
o0 = _compute_nbr_overlap_slow(gex_nbrs, tcr_nbrs)
## params for log10_s determined with
## statsmodels.formula.api.ols(f'log10_overlap_sdev ~
## log10_expected_overlap + total_log10_indegree_variance,...)
# Intercept -0.340085
# log10_expected_overlap 0.691433
# total_log10_indegree_variance 0.253497
total_log10_indegree_variance = (
np.log10(gex_indegree_bias_stats.variance)+
np.log10(tcr_indegree_bias_stats.variance))
log10_s_fitted = (0.691433 * np.log10(expected_overlap)
+0.253497 * total_log10_indegree_variance
-0.340085)
s_fitted = 10**log10_s_fitted
if zscore_source=='fitting':
s = s_fitted
m = expected_overlap
z_fitted = (o0-m)/s
z = z_fitted
else:
z = (o0-m)/s
z_fitted = (o0-expected_overlap)/s_fitted
total_seconds = time.time() - starttime
return {
'overlap':o0,
'expected_overlap':expected_overlap,
'overlap_mean':m,
'overlap_sdev':s,
'overlap_zscore':z,
'overlap_zscore_fitted':z_fitted,
'overlap_zscore_source':zscore_source,
'nodes':N,
'calculation_time':total_seconds,
'calculation_time_fitted':10**estimated_log10_calculation_time,
f'{gtag}_edges':gex_edges,
f'{ttag}_edges':tcr_edges,
f'{gtag}_indegree_variance':gex_indegree_bias_stats.variance,
f'{gtag}_indegree_skewness':gex_indegree_bias_stats.skewness,
f'{gtag}_indegree_kurtosis':gex_indegree_bias_stats.kurtosis,
f'{ttag}_indegree_variance':tcr_indegree_bias_stats.variance,
f'{ttag}_indegree_skewness':tcr_indegree_bias_stats.skewness,
f'{ttag}_indegree_kurtosis':tcr_indegree_bias_stats.kurtosis,
'indegree_correlation_R':indegree_correlation.rvalue,
'indegree_correlation_P':indegree_correlation.pvalue,
}
# def _compute_graph_overlap_stats_old(
# gex_nbrs_array,
# tcr_nbrs,
# num_random_repeats,
# verbose = True
# ):
# '''
# I'm not sure about memory/compute efficiency here, for big datasets
# '''
# N = len(gex_nbrs_array)
# gex_nbrs = [frozenset(x) for x in gex_nbrs_array]
# overlaps = []
# for r in range(num_random_repeats+1):
# if r:
# tcr_shuffle = np.random.permutation(N)
# else:
# tcr_shuffle = np.arange(N)
# overlap = _compute_graph_overlap(gex_nbrs, tcr_nbrs, tcr_shuffle)
# if verbose and r%10==0:
# print(f'compute_graph_overlap_stats: rep {r:2d} {overlap:6d}')
# overlaps.append(overlap)
# o0 = overlaps[0]
# m,s = np.mean(overlaps[1:]), np.std(overlaps[1:])
# z = (o0-m)/s
# gex_edges = sum(len(x) for x in gex_nbrs)
# tcr_edges = sum(len(x) for x in tcr_nbrs)
# return {
# 'overlap':o0,
# 'mean':m,
# 'sdev':s,
# 'zscore':z,
# 'nodes':N,
# 'gex_edges':gex_edges,
# 'tcr_edges':tcr_edges,
# }
def compute_graph_vs_graph_stats(
adata,
all_nbrs,
num_random_repeats = 100,
outfile_prefix = None,
):
'''Here we are assessing overall graph-vs-graph correlation by looking at
the shared edges between TCR and GEX neighbor graphs and comparing
that observed number to the number we would expect if the graphs were
completely uncorrelated. Our null model for uncorrelated graphs is to
take the vertices of one graph and randomly renumber them (permute their
labels). We compare the observed overlap to that expected under this null
model by computing a Z-score, either by permuting one of the graph's
vertices many times to get a mean and standard deviation of the overlap
distribution, or, for large graphs where this is time consuming,
by using a regression model for the
standard deviation.
This is different from graph-vs-graph analysis which looks at graph
overlap on a node-by-node basis
returns a pandas dataframe with the results, and also stores them in
adata.uns['conga_results'][conga.tags.GRAPH_VS_GRAPH_STATS]
'''
num_clones = adata.shape[0]
agroups, bgroups = preprocess.setup_tcr_groups(adata)
clusters_gex = np.array(adata.obs['clusters_gex'])
clusters_tcr = np.array(adata.obs['clusters_tcr'])
gex_cluster_nbrs = []
tcr_cluster_nbrs = []
for i in range(num_clones):
ag, bg = agroups[i], bgroups[i]
gc, tc = clusters_gex[i], clusters_tcr[i]
gex_cluster_nbrs.append(
np.nonzero((clusters_gex==gc)&(agroups!=ag)&(bgroups!=bg))[0])
tcr_cluster_nbrs.append(
np.nonzero((clusters_tcr==tc)&(agroups!=ag)&(bgroups!=bg))[0])
dfl = []
for nbr_frac in all_nbrs:
gex_nbrs, tcr_nbrs = all_nbrs[nbr_frac]
stats = _compute_graph_overlap_stats(
gex_nbrs, tcr_nbrs, num_random_repeats)
stats['nbr_frac'] = nbr_frac
stats['graph_overlap_type'] = 'gex_nbr_vs_tcr_nbr'
dfl.append(stats)
stats = _compute_graph_overlap_stats(
gex_nbrs, tcr_cluster_nbrs, num_random_repeats)
stats['nbr_frac'] = nbr_frac
stats['graph_overlap_type'] = 'gex_nbr_vs_tcr_cluster'
dfl.append(stats)
stats = _compute_graph_overlap_stats(
tcr_nbrs, gex_cluster_nbrs, num_random_repeats, swaptags=True)
stats['nbr_frac'] = nbr_frac
stats['graph_overlap_type'] = 'gex_cluster_vs_tcr_nbr'
dfl.append(stats)
results =
|
pd.DataFrame(dfl)
|
pandas.DataFrame
|
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import nose
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, isnull, notnull,
bdate_range, date_range, _np_version_under1p7)
import pandas.core.common as com
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal,
ensure_clean)
import pandas.util.testing as tm
def _skip_if_numpy_not_friendly():
# not friendly for < 1.7
if _np_version_under1p7:
raise nose.SkipTest("numpy < 1.7")
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_numeric_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
self.assertEqual(ct(10,unit='ns'), np.timedelta64(10,'ns').astype('m8[ns]'))
self.assertEqual(ct(10,unit='us'), np.timedelta64(10,'us').astype('m8[ns]'))
self.assertEqual(ct(10,unit='ms'), np.timedelta64(10,'ms').astype('m8[ns]'))
self.assertEqual(ct(10,unit='s'), np.timedelta64(10,'s').astype('m8[ns]'))
self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]'))
def test_short_format_converters(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10,'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10,'ns'))
self.assertEqual(ct('100'), np.timedelta64(100,'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100,'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000,'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000,'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000,'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000,'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000,'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000,'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000,'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1,'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10,'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100,'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000,'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000,'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000,'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000,'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1,'s')))
self.assertEqual(ct('06:00:01'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.0'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.01'), conv(np.timedelta64(1000*(6*3600+1)+10,'ms')))
self.assertEqual(ct('- 1days, 00:00:01'), -conv(d1+np.timedelta64(1,'s')))
self.assertEqual(ct('1days, 06:00:01'), conv(d1+np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(d1+np.timedelta64(1000*(6*3600+1)+10,'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_nat_converters(self):
_skip_if_numpy_not_friendly()
self.assertEqual(to_timedelta('nat',box=False), tslib.iNaT)
self.assertEqual(to_timedelta('nan',box=False), tslib.iNaT)
def test_to_timedelta(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(to_timedelta('1 days 06:05:01.00003',box=False), conv(d1+np.timedelta64(6*3600+5*60+1,'s')+np.timedelta64(30,'us')))
self.assertEqual(to_timedelta('15.5us',box=False), conv(np.timedelta64(15500,'ns')))
# empty string
result = to_timedelta('',box=False)
self.assertEqual(result, tslib.iNaT)
result = to_timedelta(['', ''])
self.assert_(isnull(result).all())
# pass thru
result = to_timedelta(np.array([np.timedelta64(1,'s')]))
expected = np.array([np.timedelta64(1,'s')])
tm.assert_almost_equal(result,expected)
# ints
result = np.timedelta64(0,'ns')
expected = to_timedelta(0,box=False)
self.assertEqual(result, expected)
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d','1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = Series([ np.timedelta64(0,'ns'), np.timedelta64(10,'s').astype('m8[ns]') ],dtype='m8[ns]')
expected = to_timedelta([0,10],unit='s')
tm.assert_series_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
def test_to_timedelta_via_apply(self):
_skip_if_numpy_not_friendly()
# GH 5458
expected = Series([np.timedelta64(1,'s')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_timedelta_ops(self):
_skip_if_numpy_not_friendly()
# GH4984
# make sure ops return timedeltas
s = Series([Timestamp('20130101') + timedelta(seconds=i*i) for i in range(10) ])
td = s.diff()
result = td.mean()[0]
# TODO This should have returned a scalar to begin with. Hack for now.
expected = to_timedelta(timedelta(seconds=9))
tm.assert_almost_equal(result, expected)
result = td.quantile(.1)
# This properly returned a scalar.
expected = to_timedelta('00:00:02.6')
tm.assert_almost_equal(result, expected)
result = td.median()[0]
# TODO This should have returned a scalar to begin with. Hack for now.
expected = to_timedelta('00:00:08')
tm.assert_almost_equal(result, expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()[0]
expected = to_timedelta('00:01:21')
tm.assert_almost_equal(result, expected)
def test_to_timedelta_on_missing_values(self):
_skip_if_numpy_not_friendly()
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'), timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
self.assertEqual(actual, timedelta_NaT)
actual = pd.to_timedelta(pd.NaT)
self.assertEqual(actual, timedelta_NaT)
def test_timedelta_ops_with_missing_values(self):
_skip_if_numpy_not_friendly()
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = s1 + pd.NaT # NaT is datetime, not timedelta
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
|
assert_frame_equal(actual, df2)
|
pandas.util.testing.assert_frame_equal
|
"""Utilities for parsing corpus tsv files into pandas DataFrames."""
import logging
from glob import glob
from pathlib import Path
from typing import Dict, Iterable, Union
import pandas as pd
from tqdm import tqdm
import harmonic_inference.utils.corpus_utils as cu
from harmonic_inference.data.corpus_constants import (
CHORD_ONSET_BEAT,
CONVERTERS,
DTYPES,
MEASURE_OFFSET,
NOTE_ONSET_BEAT,
)
def read_dump(
file: str,
index_col: Union[int, Iterable] = (0, 1),
converters: Dict = None,
dtypes: Dict = None,
**kwargs,
) -> pd.DataFrame:
"""
Read a corpus tsv file into a pandas DataFrame.
Parameters
----------
file : string
The tsv file to parse.
index_col : int or list(int)
The index (or indices) of column(s) to use as the index. For files.tsv, use 0.
converters : dict
Converters which will be passed to the pandas read_csv function. These will
overwrite/be added to the default list of CONVERTERS.
dtypes : dict
Dtypes which will be passed to the pandas read_csv function. These will
overwrite/be added to the default list of DTYPES.
Returns
-------
df : pd.DataFrame
The pandas DataFrame, parsed from the given tsv file.
"""
conv = CONVERTERS.copy()
types = DTYPES.copy()
if dtypes is not None:
types.update(dtypes)
if converters is not None:
conv.update(converters)
return pd.read_csv(file, sep="\t", index_col=index_col, dtype=types, converters=conv, **kwargs)
def load_clean_corpus_dfs(dir_path: Union[str, Path], count: int = None):
"""
Return cleaned DataFrames from the corpus data in the given directory. The DataFrames will
be read from files: 'files.tsv', 'measures.tsv', 'chords.tsv', and 'notes.df'.
They will undergo the following cleaning procedure:
1. Remove repeats from measures.
2. Drop note and chords corresponding to removed measures.
3. Drop chords with numeral '@none' or pd.NAN.
4. Add offsets to notes.
5. Merge tied notes.
6. Add offsets to chords.
Parameters
----------
dir_path : str or Path
The path to a directory containing the files: 'files.tsv', 'measures.tsv', 'chords.tsv',
and 'notes.df'.
count : int
If given, the number of pieces to read in. Else, read all of them.
Returns
-------
files_df : pd.DataFrame
The files data frame.
measures_df : pd.DataFrame
The measures data frame with repeats removed.
chords_df : pd.DataFrame
The chords data frame, cleaned as described.
notes_df : pd.DataFrame
The notes data frame, cleaned as described.
"""
files_df = read_dump(Path(dir_path, "files.tsv"), index_col=0)
measures_df = read_dump(Path(dir_path, "measures.tsv"))
notes_df = read_dump(Path(dir_path, "notes.tsv"))
try:
chords_df = read_dump(Path(dir_path, "chords.tsv"), low_memory=False)
except Exception:
# Enable loading with no annotations
chords_df = None
if count is not None:
files_df = files_df.iloc[:count]
measures_df = measures_df.loc[files_df.index]
notes_df = notes_df.loc[files_df.index]
if chords_df is not None:
chords_df = chords_df.loc[files_df.index]
# Bugfix for Couperin piece "next" error
files_df = files_df.loc[~(files_df["file_name"] == "c11n08_Rondeau.tsv")]
measures_df = measures_df.loc[files_df.index]
notes_df = notes_df.loc[files_df.index]
if chords_df is not None:
chords_df = chords_df.loc[chords_df.index.get_level_values(0).isin(files_df.index)]
# End bugfix
# Incomplete column renaming
if "offset" in measures_df.columns:
measures_df[MEASURE_OFFSET].fillna(measures_df["offset"], inplace=True)
measures_df = measures_df.drop("offset", axis=1)
if chords_df is not None:
if "onset" in chords_df.columns:
chords_df[CHORD_ONSET_BEAT].fillna(chords_df["onset"], inplace=True)
chords_df = chords_df.drop("onset", axis=1)
if "onset" in notes_df.columns:
notes_df[NOTE_ONSET_BEAT].fillna(notes_df["onset"], inplace=True)
notes_df = notes_df.drop("onset", axis=1)
# Remove measure repeats
if isinstance(measures_df.iloc[0].next, tuple):
measures_df = cu.remove_repeats(measures_df)
# Remove unmatched
notes_df = cu.remove_unmatched(notes_df, measures_df)
if chords_df is not None:
chords_df = cu.remove_unmatched(chords_df, measures_df)
chords_df = chords_df.drop(
chords_df.loc[(chords_df.numeral == "@none") | chords_df.numeral.isnull()].index
)
# Remove notes with invalid onset times
note_measures = pd.merge(
notes_df.reset_index(),
measures_df.reset_index(),
how="left",
on=["file_id", "mc"],
)
valid_onsets = (note_measures[MEASURE_OFFSET] <= note_measures[NOTE_ONSET_BEAT]) & (
note_measures[NOTE_ONSET_BEAT] < note_measures["act_dur"] + note_measures[MEASURE_OFFSET]
)
if not valid_onsets.all():
with pd.option_context("display.max_rows", None, "display.max_columns", None):
invalid_string = note_measures.loc[
~valid_onsets,
["file_id", "note_id", "mc", NOTE_ONSET_BEAT, MEASURE_OFFSET, "act_dur"],
]
logging.debug(
f"{(~valid_onsets).sum()} notes have invalid onset times:\n{invalid_string}"
)
notes_df = notes_df.loc[valid_onsets.values]
# Remove chords with invalid onset times
if chords_df is not None:
chord_measures = pd.merge(
chords_df.reset_index(),
measures_df.reset_index(),
how="left",
on=["file_id", "mc"],
)
valid_onsets = (chord_measures[MEASURE_OFFSET] <= chord_measures[CHORD_ONSET_BEAT]) & (
chord_measures[CHORD_ONSET_BEAT]
< chord_measures["act_dur"] + chord_measures[MEASURE_OFFSET]
)
if not valid_onsets.all():
with pd.option_context("display.max_rows", None, "display.max_columns", None):
invalid_string = chord_measures.loc[
~valid_onsets,
["file_id", "chord_id", "mc", CHORD_ONSET_BEAT, MEASURE_OFFSET, "act_dur"],
]
logging.debug(
f"{(~valid_onsets).sum()} chords have invalid onset times:\n{invalid_string}"
)
chords_df = chords_df.loc[valid_onsets.values]
# Add offsets
if not all([column in notes_df.columns for column in ["offset_beat", "offset_mc"]]):
notes_df = cu.add_note_offsets(notes_df, measures_df)
# Merge ties
notes_df = cu.merge_ties(notes_df)
# Add chord metrical info
if chords_df is not None:
chords_df = cu.add_chord_metrical_data(chords_df, measures_df)
# Remove chords with dur 0
if chords_df is not None:
invalid_dur = chords_df["duration"] <= 0
if invalid_dur.any():
with
|
pd.option_context("display.max_rows", None, "display.max_columns", None)
|
pandas.option_context
|
import numpy as np
import pandas as pd
import datetime as datetime
from scipy.signal import find_peaks, peak_prominences
from scipy.interpolate import interp1d
from scipy import signal
from scipy.integrate import trapz
'''
Feature Engineering of Wearable Sensors:
Metrics computed:
Mean Heart Rate Variability
Median Heart Rate Variability
Maximum Heart Rate Variability
Minimum Heart Rate Variability
SDNN (HRV)
RMSSD (HRV)
NNx (HRV)
pNNx (HRV)
HRV Frequency Domain Metrics:
PowerVLF
PowerLF
PowerHF
PowerTotal
LF/HF
PeakVLF
PeakLF
PeakHF
FractionLF
FractionHF
EDA Peaks
Activity Bouts
Interday Summary:
Interday Mean
Interday Median
Interday Maximum
Interday Minimum
Interday Quartile 1
Interday Quartile 3
Interday Standard Deviation
Interday Coefficient of Variation
Intraday Standard Deviation (mean, median, standard deviation)
Intraday Coefficient of Variation (mean, median, standard deviation)
Intraday Mean (mean, median, standard deviation)
Daily Mean
Intraday Summary:
Intraday Mean
Intraday Median
Intraday Minimum
Intraday Maximum
Intraday Quartile 1
Intraday Quartile 3
TIR (Time in Range of default 1 SD)
TOR (Time outside Range of default 1 SD)
POR (Percent outside Range of default 1 SD)
MASE (Mean Amplitude of Sensor Excursions, default 1 SD)
Hours from Midnight (circadian rhythm feature)
Minutes from Midnight (ciracadian rhythm feature)
'''
def e4import(filepath, sensortype, Starttime='NaN', Endtime='NaN', window='5min'): #window is in seconds
"""
brings in an empatica compiled file **this is not raw empatica data**
Args:
filepath (String): path to file
sensortype (Sting): Options: 'EDA', 'HR', 'ACC', 'TEMP', 'BVP'
Starttime (String): (optional, default arg = 'NaN') format '%Y-%m-%d %H:%M:%S.%f', if you want to only look at data after a specific time
Endtime (String): (optional, default arg = 'NaN') format '%Y-%m-%d %H:%M:%S.%f', if you want to only look at data before a specific time
window (String): default '5min'; this is the window your data will be resampled on.
Returns:
(pd.DataFrame): dataframe of data with Time, Mean, Std columns
"""
if sensortype == 'ACC':
data = pd.read_csv(filepath,header=None, names = ["Time", "x", "y", "z"])
data['Var'] = np.sqrt(data['x']**2 + data['y']**2 + data['z']**2)
data = data.drop(columns=['x', 'y', 'z'])
else:
data = pd.read_csv(filepath, header=None, names=['Time', 'Var'])
data['Time'] = pd.to_datetime(data['Time'], format='%Y-%m-%d %H:%M:%S.%f')
if Starttime != 'NaN':
VarData = data.loc[data.loc[:, 'Time'] >= Starttime, :]
if Endttime != 'NaN':
VarData = VarData.loc[VarData.loc[:, 'Time'] <= Endtime, :]
else:
VarData = data
Data = pd.DataFrame()
Data[[(sensortype + '_Mean')]] = VarData.resample(window, on='Time').mean()
Data[[(sensortype + '_Std')]] = VarData.resample(window, on='Time').std()
Data = Data.reset_index()
print((sensortype + ' Import and Resample Complete'))
return(Data)
def HRV(time, IBI, ibimultiplier = 1000):
"""
computes Heart Rate Variability metrics
Args:
time (pandas.DataFrame column or pandas series): time column
IBI (pandas.DataFrame column or pandas series): column with inter beat intervals
ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1
Returns:
maxHRV (FloatType): maximum HRV
minHRV (FloatType): minimum HRV
meanHRV (FloatType): mean HRV
medianHRV(FloatType): median HRV
"""
time = time
ibi = IBI*ibimultiplier
maxHRV = round(max(ibi) * 10) / 10
minHRV = round(min(ibi) * 10) / 10
meanHRV = round(np.mean(ibi) * 10) / 10
medianHRV = round(np.median(ibi) * 10) / 10
return maxHRV, minHRV, meanHRV, medianHRV
def SDNN(time, IBI, ibimultiplier=1000):
"""
computes Heart Rate Variability metric SDNN
Args:
time (pandas.DataFrame column or pandas series): time column
IBI (pandas.DataFrame column or pandas series): column with inter beat intervals
ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1
Returns:
SDNN (FloatType): standard deviation of NN intervals
"""
time = time
ibi = IBI*ibimultiplier
SDNN = round(np.sqrt(np.var(ibi, ddof=1)) * 10) / 10
return SDNN
def RMSSD(time, IBI, ibimultiplier=1000):
"""
computes Heart Rate Variability metric RMSSD
Args:
time (pandas.DataFrame column or pandas series): time column
IBI (pandas.DataFrame column or pandas series): column with inter beat intervals
ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1
Returns:
RMSSD (FloatType): root mean square of successive differences
"""
time = time
ibi = IBI*ibimultiplier
differences = abs(np.diff(ibi))
rmssd = np.sqrt(np.sum(np.square(differences)) / len(differences))
return round(rmssd * 10) / 10
def NNx(time, IBI, ibimultiplier=1000, x=50):
"""
computes Heart Rate Variability metrics NNx and pNNx
Args:
time (pandas.DataFrame column or pandas series): time column
IBI (pandas.DataFrame column or pandas series): column with inter beat intervals
ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1
x (IntegerType): default = 50; set the number of times successive heartbeat intervals exceed 'x' ms
Returns:
NNx (FloatType): the number of times successive heartbeat intervals exceed x ms
pNNx (FloatType): the proportion of NNx divided by the total number of NN (R-R) intervals.
"""
time = time
ibi = IBI*ibimultiplier
differences = abs(np.diff(ibi))
n = np.sum(differences > x)
p = (n / len(differences)) * 100
return (round(n * 10) / 10), (round(p * 10) / 10)
def FrequencyHRV(IBI, ibimultiplier=1000, fs=1):
"""
computes Heart Rate Variability frequency domain metrics
Args:
IBI (pandas.DataFrame column or pandas series): column with inter beat intervals
ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1
fs (IntegerType): Optional sampling frequency for frequency interpolation (default=1)
Returns:
(dictionary): dictionary of frequency domain HRV metrics with keys:
PowerVLF (FloatType): Power of the Very Low Frequency (VLF): 0-0.04Hz band
PowerLF (FloatType): Power of the Low Frequency (LF): 0.04-0.15Hz band
PowerHF (FloatType): Power of the High Frequency (HF): 0.15-0.4Hz band
PowerTotal (FloatType):Total power over all frequency bands
LF/HF (FloatType): Ratio of low and high power
Peak VLF (FloatType): Peak of the Very Low Frequency (VLF): 0-0.04Hz band
Peak LF (FloatType): Peak of the Low Frequency (LF): 0.04-0.15Hz band
Peak HF (FloatType): Peak of the High Frequency (HF): 0.15-0.4Hz band
FractionLF (FloatType): Fraction that is low frequency
FractionHF (FloatType): Fraction that is high frequency
"""
ibi = IBI*ibimultiplier
steps = 1 / fs
# create interpolation function based on the rr-samples.
x = np.cumsum(ibi) / 1000.0
f = interp1d(x, ibi, kind='cubic')
# sample from interpolation function
xx = np.arange(1, np.max(x), steps)
ibi_interpolated = f(xx)
fxx, pxx = signal.welch(x=ibi_interpolated, fs=fs)
'''
Segement found frequencies in the bands
- Very Low Frequency (VLF): 0-0.04Hz
- Low Frequency (LF): 0.04-0.15Hz
- High Frequency (HF): 0.15-0.4Hz
'''
cond_vlf = (fxx >= 0) & (fxx < 0.04)
cond_lf = (fxx >= 0.04) & (fxx < 0.15)
cond_hf = (fxx >= 0.15) & (fxx < 0.4)
# calculate power in each band by integrating the spectral density
vlf = trapz(pxx[cond_vlf], fxx[cond_vlf])
lf = trapz(pxx[cond_lf], fxx[cond_lf])
hf = trapz(pxx[cond_hf], fxx[cond_hf])
# sum these up to get total power
total_power = vlf + lf + hf
# find which frequency has the most power in each band
peak_vlf = fxx[cond_vlf][np.argmax(pxx[cond_vlf])]
peak_lf = fxx[cond_lf][np.argmax(pxx[cond_lf])]
peak_hf = fxx[cond_hf][np.argmax(pxx[cond_hf])]
# fraction of lf and hf
lf_nu = 100 * lf / (lf + hf)
hf_nu = 100 * hf / (lf + hf)
results = {}
results['PowerVLF'] = round(vlf, 2)
results['PowerLF'] = round(lf, 2)
results['PowerHF'] = round(hf, 2)
results['PowerTotal'] = round(total_power, 2)
results['LF/HF'] = round(lf / hf, 2)
results['PeakVLF'] = round(peak_vlf, 2)
results['PeakLF'] = round(peak_lf, 2)
results['PeakHF'] = round(peak_hf, 2)
results['FractionLF'] = round(lf_nu, 2)
results['FractionHF'] = round(hf_nu, 2)
return results
def PeaksEDA(eda, time):
"""
calculates peaks in the EDA signal
Args:
eda (pandas.DataFrame column or pandas series): eda column
time (pandas.DataFrame column or pandas series): time column
Returns:
countpeaks (IntegerType): the number of peaks total
peakdf (pandas.DataFrame): a pandas dataframe with time and peaks to easily integrate with your data workflow
"""
EDAy = eda.to_numpy()
EDAx = time.to_numpy()
peaks, _ = find_peaks(EDAy, height=0, distance=4, prominence=0.3)
peaks_x = []
for i in peaks:
px = time.iloc[i]
peaks_x.append(px)
peakdf = pd.DataFrame()
peakdf['Time'] = peaks_x
peakdf['Peak'] = ([1]*len(peaks_x))
countpeaks = len(peakdf)
return countpeaks, peakdf
def exercisepts(acc, hr, time): #acc and hr must be same length, acc must be magnitude
"""
calculates activity bouts using accelerometry and heart rate
Args:
acc (pandas.DataFrame column or pandas series): accelerometry column
hr (pandas.DataFrame column or pandas series): heart rate column
time (pandas.DataFrame column or pandas series): time column
Returns:
countbouts (IntegerType): the number of acitvity bouts total
returndf (pandas.DataFrame): a pandas dataframe with time and activity bouts (designated as a '1') to easily integrate with your data workflow
"""
exercisepoints = []
for z in range(len(acc)):
if acc[z] > np.mean(acc[0:z]):
if hr[z] > np.mean(hr[0:z]):
exercisepoints.append(1)
else:
exercisepoints.append(0)
else:
exercisepoints.append(0)
returndf = pd.DataFrame()
returndf['Time'] = time
returndf['Activity Bouts'] = exercisepoints
countbouts = len(exercisepoints)
return countbouts, returndf
def interdaycv(column):
"""
computes the interday coefficient of variation on pandas dataframe Sensor column
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
Returns:
cvx (IntegerType): interday coefficient of variation
"""
cvx = (np.std(column) / (np.nanmean(column)))*100
return cvx
def interdaysd(column):
"""
computes the interday standard deviation of pandas dataframe Sensor column
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
Returns:
interdaysd (IntegerType): interday standard deviation
"""
interdaysd = np.std(column)
return interdaysd
def intradaycv(column, time, timeformat='%Y-%m-%d %H:%M:%S.%f'):
"""
computes the intradaycv, returns the mean, median, and sd of intraday cv Sensor column in pandas dataframe
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
time (pandas.DataFrame): time column
timeformat (String): default = '%Y-%m-%d %H:%M:%S.%f'; format of timestamp in time column
Returns:
intradaycv_mean (IntegerType): Mean, Median, and SD of intraday coefficient of variation
intradaycv_median (IntegerType): Median of intraday coefficient of variation
intradaycv_sd (IntegerType): SD of intraday coefficient of variation
Requires:
interdaycv() function
"""
intradaycv = []
df = pd.DataFrame()
df['Column'] = column
df['Time'] = pd.to_datetime(time, format=timeformat)
df['Day'] = df['Time'].dt.date
for i in pd.unique(df['Day']):
intradaycv.append(interdaycv(df[df['Day']==i]['Column']))
intradaycv_mean = np.mean(intradaycv)
intradaycv_median = np.median(intradaycv)
intradaycv_sd = np.std(intradaycv)
return intradaycv_mean, intradaycv_median, intradaycv_sd
def intradaysd(column, time, timeformat='%Y-%m-%d %H:%M:%S.%f'):
"""
computes the intradaysd, returns the mean, median, and sd of intraday sd Sensor column in pandas dataframe
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
time (pandas.DataFrame): time column
timeformat (String): default = '%Y-%m-%d %H:%M:%S.%f'; format of timestamp in time column
Returns:
intradaysd_mean (IntegerType): Mean, Median, and SD of intraday standard deviation
intradaysd_median (IntegerType): Median of intraday standard deviation
intradaysd_sd (IntegerType): SD of intraday standard deviation
"""
intradaysd =[]
df = pd.DataFrame()
df['Column'] = column
df['Time'] = pd.to_datetime(time, format=timeformat)
df['Day'] = df['Time'].dt.date
for i in pd.unique(df['Day']):
intradaysd.append(np.std(df[df['Day']==i]['Column']))
intradaysd_mean = np.mean(intradaysd)
intradaysd_median = np.median(intradaysd)
intradaysd_sd = np.std(intradaysd)
return intradaysd_mean, intradaysd_median, intradaysd_sd
def intradaymean(column, time, timeformat='%Y-%m-%d %H:%M:%S.%f'):
"""
computes the intradaymean, returns the mean, median, and sd of the intraday mean of the Sensor data
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
time (pandas.DataFrame): time column
timeformat (String): default = '%Y-%m-%d %H:%M:%S.%f'; format of timestamp in time column
Returns:
intradaysd_mean (IntegerType): Mean, Median, and SD of intraday standard deviation of glucose
intradaysd_median (IntegerType): Median of intraday standard deviation of glucose
intradaysd_sd (IntegerType): SD of intraday standard deviation of glucose
"""
intradaymean =[]
df = pd.DataFrame()
df['Column'] = column
df['Time'] = pd.to_datetime(time, format=timeformat)
df['Day'] = df['Time'].dt.date
for i in pd.unique(df['Day']):
intradaymean.append(np.nanmean(df[df['Day']==i]['Column']))
intradaymean_mean = np.mean(intradaymean)
intradaymean_median = np.median(intradaymean)
intradaymean_sd = np.std(intradaymean)
return intradaymean_mean, intradaymean_median, intradaymean_sd
def dailymean(column, time, timeformat='%Y-%m-%d %H:%M:%S.%f'):
"""
computes the mean of each day
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
time (pandas.DataFrame): time column
timeformat (String): default = '%Y-%m-%d %H:%M:%S.%f'; format of timestamp in time column
Returns:
pandas.DataFrame with days and means as columns
"""
intradaymean =[]
df = pd.DataFrame()
df['Column'] = column
df['Time'] = pd.to_datetime(time, format=timeformat)
df['Day'] = df['Time'].dt.date
for i in pd.unique(df['Day']):
intradaymean.append(np.nanmean(df[df['Day']==i]['Column']))
dailymeandf = pd.DataFrame()
dailymeandf['Day'] = pd.unique(df['Day'])
dailymeandf['Mean'] = intradaymean
return dailymeandf
def dailysummary(column, time, timeformat='%Y-%m-%d %H:%M:%S.%f'):
"""
computes the summary of each day (mean, median, std, max, min, Q1G, Q3G)
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
time (pandas.DataFrame): time column
timeformat (String): default = '%Y-%m-%d %H:%M:%S.%f'; format of timestamp in time column
Returns:
pandas.DataFrame with days and summary metrics as columns
"""
intradaymean =[]
intradaymedian =[]
intradaysd =[]
intradaymin =[]
intradaymax =[]
intradayQ1 =[]
intradayQ3 =[]
df = pd.DataFrame()
df['Column'] = column
df['Time'] = pd.to_datetime(time, format=timeformat)
df['Day'] = df['Time'].dt.date
for i in
|
pd.unique(df['Day'])
|
pandas.unique
|
"""!
All functions providing plotting functionalities.
"""
import matplotlib.pylab as plt
import matplotlib.dates as mdates
import matplotlib.image as image
import pandas as pd
import re
import argparse
import datetime as dt
import numpy as np
from pandas.plotting import register_matplotlib_converters
from datetime import datetime
register_matplotlib_converters()
plt.rcParams.update({'font.size': 22})
environment_sensor_pattern = re.compile(r"([0-9-]+)\s([0-9:.]+):\stemperature:\s([0-9.]+),\sgas:\s([0-9]+),\shumidity:\s([0-9.]+),\spressure:\s([0-9.]+),\saltitude:\s([0-9.]+)", re.MULTILINE)
soil_moisture_pattern = re.compile(r"([0-9-]+)\s([0-9.:]+):\s\[([0-9]+),\s([0-9.]+),\s([0-9.]+)\]", re.MULTILINE)
def plot_soil_moisture(dict, past24):
"""!
Plots soil moisture data in simple line chart
@param dict: Dicitonary containing timestamps and associated readings.
"""
lists = sorted(dict.items())
x, y = zip(*lists)
fig, ax = plt.subplots()
ax.plot(x, y, 'k', linewidth=2)
fig.autofmt_xdate()
hours6 = mdates.HourLocator(interval=6)
hours3 = mdates.HourLocator(interval=3)
# im = image.imread('./icons/Grow_Space_Logo.png')
# fig.figimage(im, 300, 0, zorder=3, alpha=0.2)
ax.xaxis.set_minor_locator(hours3)
ax.tick_params(which='major', length=7, width=2, color='black')
ax.tick_params(which='minor', length=4, width=2, color='black')
ax.xaxis.set_major_locator(hours6)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d - %H'))
ax.grid()
plt.xlabel("Day - Hour")
plt.ylabel("Moisture Percentage (%)")
plt.title("Soil Moisture % vs Time")
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
if past24:
datemin = np.datetime64(x[-1], 'h') - np.timedelta64(24, 'h')
datemax = np.datetime64(x[-1], 'h')
ax.set_xlim(datemin, datemax)
plt.xlabel("Hour")
plt.title("Soil Moisture % Past 24 Hrs")
ax.xaxis.set_major_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.savefig('Moisture_vs_Time_24H.png', dpi=500)
plt.savefig('Moisture_vs_Time.png', dpi=500)
# plt.show()
def plot_temperature(dict, past24):
"""!
Plots temperature data in simple line chart
@param dict: Dicitonary containing timestamps and associated readings.
"""
lists = sorted(dict.items())
x, y = zip(*lists)
fig, ax = plt.subplots()
ax.plot(x, y, 'k', linewidth=2)
fig.autofmt_xdate()
hours6 = mdates.HourLocator(interval=6)
hours3 = mdates.HourLocator(interval=3)
# im = image.imread('./icons/Grow_Space_Logo.png')
# fig.figimage(im, 650, 0, zorder=3, alpha=0.2)
ax.xaxis.set_major_locator(hours6)
ax.xaxis.set_minor_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d - %H'))
ax.tick_params(which='major', length=7, width=2, color='black')
ax.tick_params(which='minor', length=4, width=2, color='black')
ax.grid()
plt.title("Temperature Over Time")
plt.xlabel("Time (Month-Day Hour)")
plt.ylabel("Temperature (°C)")
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
if past24:
datemin = np.datetime64(x[-1], 'h') - np.timedelta64(24, 'h')
datemax = np.datetime64(x[-1], 'h')
ax.set_xlim(datemin, datemax)
plt.xlabel("Hour")
plt.title('Temperature Past 24 Hrs')
ax.xaxis.set_major_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.savefig('Temperature_vs_Time_24H.png', dpi=500)
plt.savefig('Temperature_vs_Time.png', dpi=500)
# plt.show()
def boxplot_environment(df):
"""!
Creates a boxplot of all the relevant environment sensor data.
What is a boxplot?
Text from https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.boxplot.html:
The box extends from the Q1 to Q3 quartile values of the data, with a line at the median (Q2).
The whiskers extend from the edges of box to show the range of the data.
The position of the whiskers is set by default to 1.5 * IQR (IQR = Q3 - Q1) from the edges of the box.
Outlier points are those past the end of the whiskers.
@param df: dataframe object from which we generate a boxplot.
"""
df['VOC'] = df['VOC'].div(1000)
# with plt.style.context("seaborn"):
fig, ax = plt.subplots(1, 3)
fig.suptitle('Environment Sensor Data')
df.boxplot('Temperature', ax=ax[0])
df.boxplot('VOC', ax=ax[1], fontsize=12)
df.boxplot('Humidity', ax=ax[2])
ax[0].set_ylabel("Temperature (°C)")
ax[1].set_ylabel("VOC (kΩ)")
ax[2].set_ylabel("Humidity (%)")
plt.subplots_adjust(top=0.95)
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
plt.savefig('Environment_Boxplot.png', dpi=500)
# plt.show()
def extract_data_from_log(data, pattern):
"""!
Function for extracting data out of a log file using regex matching.
Returns all regex match objects.
@param data: Raw data from the log file.
@param pattern: Regex pattern to use for matching.
"""
matches = list()
for line in data:
matches.append(re.match(pattern, line))
return matches
def generate_plots(root="./logs/", soil_sensor_log="soil_moisture_sensor_1.txt", environment_sensor_log="environment_sensor.txt"):
# Plot soil moisture data
with open(root+soil_sensor_log, "r") as myfile:
data = myfile.readlines()
matches = extract_data_from_log(data, soil_moisture_pattern)
data_dict = dict()
for match in matches:
# current_val = float(match.group(4)) # Raw voltage reading
current_val = float(match.group(5)) # Percentage reading
index_time = match.group(1) + " " + match.group(2)
index_dt = dt.datetime.strptime(index_time, "%Y-%m-%d %H:%M:%S.%f")
data_dict[index_dt] = current_val
plot_soil_moisture(data_dict, True)
plot_soil_moisture(data_dict, False)
# Plot temperature data
with open(root+environment_sensor_log, "r") as myfile:
data = myfile.readlines()
matches = extract_data_from_log(data, environment_sensor_pattern)
data_dict = dict()
temperature_dict = dict()
data_dict['Temperature'] = {}
data_dict['VOC'] = {}
data_dict['Humidity'] = {}
for match in matches:
index_time = match.group(1) + " " + match.group(2)
index_dt = dt.datetime.strptime(index_time, "%Y-%m-%d %H:%M:%S.%f")
data_dict['Temperature'][index_dt] = float(match.group(3))
data_dict['VOC'][index_dt] = float(match.group(4))
data_dict['Humidity'][index_dt] = float(match.group(5))
plot_temperature(data_dict['Temperature'], True)
plot_temperature(data_dict['Temperature'], False)
# Plot environment sensor data
df =
|
pd.DataFrame.from_dict(data_dict, orient='columns')
|
pandas.DataFrame.from_dict
|
"""
Created on 2019-12-20
@author: <NAME> - github.com/rpadrino - IMDEA Networks
"""
#imports
from __future__ import division
import math
#from mod_read_crop_files import *
#from mod_tracker import *
#from mod_bin_classifier import *
from mod_multiclass_classifier import *
import os
from os import listdir
from os.path import isfile, join
import shutil #for moving files
import numpy as np
import pandas as pd
import argparse
import sys
###### USE ########
# import:
# from mod_central_classification import *
#
# Main function (examples):
# centralClassification()
# centralClassification('guess')
# centralClassification('guess', './classification/', True)
# centralClassification(input_path='./classification/')
#
# optional parameters
# cameras_element: to provide the camera element to be processed: top, bottom. Default: guess
# input_path: to provide where the local results (cameras) are. Default: ./classification/
# debug: use for debugging [False by default].
# config. and vars.
input_classification_folder = "./classification/" # the folder will contain the local results and other files from the species classification per each camera in subfolders
###output_classification_folder = "./classification/output/" # the folder will contain the final results and other files for the species classification
output_classification_folder = "output/" # the folder will contain the final results and other files for the species classification
input_classification_csv_file = "camera_classification.csv"
## CSV content: ......
input_classification_path_to_crops_file = "path_crops_folder.txt"
output_classification_best_file = "best_classification.txt"
output_detection_best_file = "best_detection.txt"
#output_classification_boundary_file = "boundary_targets.txt"
#recalculate values - boundary areas
boundary_area_left = int( ( 2048 * 10.5 ) / 70.5 ) #305pixels == 10.5degrees of overlapping
boundary_area_right = 2048 - boundary_area_left
working_in_folder_path = None
working_out_folder_path = None
###### functions ########
def getTimestampFromFilename(strr):
strr_sub = strr.split('frame')[0]
if strr_sub.startswith('IMG_'): #just in case
strr_sub = strr_sub[4:]
if strr_sub.endswith('-'): #just in case
strr_sub = strr_sub[:-1]
if strr_sub.endswith('_'): #just in case
strr_sub = strr_sub[:-1]
return strr_sub
def checkCamerasElementParameter(element): #change name
cam_elements = {
"top": True,
"bottom": True,
"guess": True,
"all": False
}
return cam_elements.get(element, False)
def getIPsubnetByCamerasElement(element):
cam_elements = {
"top": 20,
"bottom": 40
}
return cam_elements.get(element, -1)
def getSpeciesNameByNumber(element):
cam_elements = {
0: "albacore",
1: 'amberjack',
2: 'atl_mackerel',
3: 'dorado',
4: 'med_mackerel',
5: 'swordfish',
6: 'others'
}
return cam_elements.get(element, -1)
#check
def getCameraFolderName(element, ncam):
subnet_name = getIPsubnetByCamerasElement(element)
camera_folder = ''
if subnet_name is not -1:
#if subnet_name and (ncam > 0 and ncam < 7):
if subnet_name and (ncam > 0 and ncam < 256):
#./cameraXX.Y/
camera_folder = "camera%d.%d/" % ( subnet_name, ncam )
return camera_folder
def getCameraNamesFolderList(cameras_element):
folder_list = []
if checkCamerasElementParameter(cameras_element):
for ii in range(1,7): #for the number of cameras in each level (cameras_element)
camera_folder = getCameraFolderName( cameras_element, ii )
if camera_folder is not '':
folder_list.append( camera_folder )
return folder_list
def checkCamerasElement(cameras_element): #change name
cameras_element_present = False
for camera_folder in getCameraNamesFolderList(cameras_element):
camera_folder_wpath = join( working_in_folder_path, camera_folder)
if os.path.isdir( camera_folder_wpath ):
if isfile( join( camera_folder_wpath, input_classification_csv_file) ):
cameras_element_present = True
break
return cameras_element_present
def getPathToCropsPerCameraFromFile(camera_name):
path_crops_folder_file = join(working_in_folder_path, camera_name, input_classification_path_to_crops_file)
path_crops_folder = open( path_crops_folder_file ).read().replace('\n','').replace('\r','')
return path_crops_folder
def getFilenameWithPath(strr, prefix): #call: df['acb'].apply( getFilenameWithPath, prefix='/path/to' )
return join(prefix, strr)
def getNumberOfDifferentSpecies(df): #count different fishes
#species_list_ids = df['artifact-multi-decision'].unique()
df_speciesid_count = pd.DataFrame( columns=('species', 'count') )
if len(df):
#for species_index in( species_list_ids ):
for ii, species_index in enumerate( df['artifact-multi-decision'].unique() ): #sorted() <-- without enumeration
df_specie = df[ df['artifact-multi-decision'] == species_index ]
number_fishes_per_specie = df_specie['artifact-id'].nunique()
df_speciesid_count.loc[ 0 if pd.isnull( df_speciesid_count.index.max() ) else df_speciesid_count.index.max() + 1 ] = [int(species_index)] + [int(number_fishes_per_specie)]
return df_speciesid_count #df: 'species', 'count'. ex.: 2->24, 3->11,...
def saveResults(df, folder_path, output_file):
return df.to_csv( join( folder_path, output_file) , index=False, header=False) #,encoding='utf-8'
def saveNumberOfFishesPerSpecies(df_list, folder_path):
for index, row in df_list.iterrows(): ## the column 'species' in not accesible, but it is as indexes
##species_index = row['species']
species_index = index
species_name = getSpeciesNameByNumber( species_index )
number_fishes = row['count']
output_file = None
status = None
try:
#output_file = open( '%s/%s.txt' % (working_out_folder_path, species_name), 'w') ##join()
output_file = open( '%s/%s.txt' % (folder_path, species_name), 'w') ##join()
output_file.write('%d\r\n' % (int(number_fishes) ) )
output_file.close()
if status is None and not status:
status = True
except IOError:
status = False
finally:
if output_file is not None:
output_file.close() #just in case
return status
def getArtifactInBoundaryBasic(df):
artifacts_in_boundary = df[ ( df['x1'] <= boundary_area_left ) | ( df['x2'] >= boundary_area_right ) ]
return artifact_in_boundary.reset_index(level=0, drop=True).copy()
def getArtifactInBoundary(df):
list_columns = np.array(['filename','x1','y1','x2','y2','artifact-multi-decision','ncam'])
##list_columns_final_order = np.array(['ncam','timestamp','x1','y1','x2','y2','species_name','artifact-multi-decision'])
list_columns_final_order = np.array(['ncam','filename','x1','y1','x2','y2','species_name','artifact-multi-decision'])
artifact_in_boundary = df[ ( df['x1'] <= boundary_area_left ) | ( df['x2'] >= boundary_area_right ) ]
artifact_in_boundary = artifact_in_boundary[list_columns].reset_index(level=0, drop=True).copy()
# add column with species names
artifact_in_boundary['species_name'] = artifact_in_boundary['artifact-multi-decision'].apply( getSpeciesNameByNumber )
# add column with timestamp from filenames
#artifact_in_boundary['timestamp'] = artifact_in_boundary['filename'].apply( getTimestampFromFilename )
#delete column "filename" - not needed
#artifact_in_boundary.drop("filename", axis=1, inplace=True)
return artifact_in_boundary[ list_columns_final_order ]
def processCamerasElement(cameras_element):
if checkCamerasElementParameter(cameras_element):
df = pd.DataFrame() # columns=('filename', 'frame', 'x1', 'y1', 'x2', 'y2', 'detection-acc',)
#filename, frame, x1, y1, x2, y2, detection-acc, bin-prob-neg, bin-prob-pos, multi-prob-1, multi-prob-2, multi-prob-3, multi-prob-4, multi-prob-5, multi-prob-6, artifact-bin-prob-pos, artifact-bin-prob-neg, artifact-bin-decision, artifact-multi-prob-1, artifact-multi-prob-2, artifact-multi-prob-3, artifact-multi-prob-4, artifact-multi-prob-5, artifact-multi-prob-6, artifact-multi-decision
df_wFishesPerSpecie = pd.DataFrame()
folder_counter = 0
for camera_folder in getCameraNamesFolderList(cameras_element):
camera_folder_wpath = join( working_in_folder_path, camera_folder)
ncam = camera_folder.split('.')[-1]
ncam = int( ncam[:-1] ) #remove last '/'
if os.path.isdir( camera_folder_wpath ):
# ncam = camera_folder.split('.')[-1]
# ncam = int( ncam[:-1] ) #remove last '/'
if isfile( join( camera_folder_wpath, input_classification_csv_file) ):
df_cam = pd.read_csv( join( camera_folder_wpath, input_classification_csv_file), header='infer' )
#df_cam['ncam'] = np.array([ ncam ] * len(df) )
df_cam['ncam'] = ncam
##df_cam.reset_index(level=0, drop=True, inplace=True) #not here, after concat
##
df_wFishesPerSpecieAndCam = getNumberOfDifferentSpecies( df_cam ) #df: 'species', 'count'. ex.: 2->24, 3->11,..
if len(df_wFishesPerSpecieAndCam):
df_wFishesPerSpecieAndCam['ncam'] = ncam
df_wFishesPerSpecie = pd.concat([df_wFishesPerSpecie, df_wFishesPerSpecieAndCam], axis = 0)
df_wFishesPerSpecie.reset_index(level=0, drop=True, inplace=True) #df: 'species', 'count'. ex.: 2->24, 3->11,..2->3, 3->....
if len(df):
df = pd.concat([df, df_cam], axis = 0)
df.reset_index(level=0, drop=True, inplace=True)
else:
df = df_cam.copy()
del df_cam
folder_counter += 1
else:
print('CSV from camera %d not Found [%s].' % (ncam, cameras_element) )
##ifp-end-csv-camera-exists
else:
print('CSV from camera %d not Found [%s].' % (ncam, cameras_element) )
## if-end-isdir
## for-end-cameras-read-csv
if len(df): #or ## len(df) ## folder_counter > 0
#NUMBER OF FISHES FROM ALL CAMERAS
# group per species
if len(df_wFishesPerSpecie):
df_wFishesPerSpecieNoCam = df_wFishesPerSpecie.copy()
df_wFishesPerSpecieNoCam.drop("ncam", axis=1, inplace=True)
number_fishes_per_specie = df_wFishesPerSpecieNoCam.groupby(['species']).sum()[['count']] ## , as_index=False
#problem: groupby('species') is removing 'count' column.
#number_fishes_per_specie = df_wFishesPerSpecie.groupby('species').sum()[['count']] ## , as_index=False
#df: 'species', 'count'. ex.: 2->24, 3->11,..2->3, 3->....
#save one file per species with the number.
saving_result_per_species = saveNumberOfFishesPerSpecies( number_fishes_per_specie, working_out_folder_path )
## df: 'species', 'count' (totals)
else:
print('Dataframe with number of fishes per species in empty. Nothing to save.')
print('')
# saveResults( pd.DataFrame() , working_out_folder_path, 'nodata_species.txt')
#BEST DETECTION
#function define in 'mod_multiclass_classifier.py'
#df_bestDetection = getBestArtifact(df) #filename, acc, species
df_bestDetection = getBestArtifactFromSegmentation(df) #filename, acc, species
df_bestDetection['species-name'] = df_bestDetection['species'].apply( getSpeciesNameByNumber )
list_columns_final_order = np.array(['filename','acc','species-name','species'])
#saving_result_best = saveResults( df_bestDetection[ list_columns_final_order ], working_out_folder_path, output_classification_best_file)
saving_result_best = saveResults( df_bestDetection[ list_columns_final_order ], working_out_folder_path, output_detection_best_file)
#copy best file
filename_best_result = df_bestDetection.head(1)['filename'].to_numpy()[0]
camera_best_result = df[ df['filename'] == filename_best_result ].head(1)['ncam']
camera_folder_best_result = getCameraFolderName( cameras_element, camera_best_result.to_numpy()[0] )
filename_best_result_wPath = join(working_in_folder_path ,camera_folder_best_result, filename_best_result)
if isfile( filename_best_result_wPath):
shutil.copy( filename_best_result_wPath , working_out_folder_path)
else:
print("It was not possible to find file for best results: %s" % (filename_best_result_wPath) )
print("")
#BOUNDARY AREAS
artifacts_in_boundary = getArtifactInBoundary( df ) #camera, timestamp, coords, species_name, species_index
#save boundary results
saving_boundary_result = "" ##VS None and +=
for ii, cam_index in enumerate( df['ncam'].unique() ):
camera_name = getCameraFolderName(cameras_element, cam_index)
#saving_boundary_result +=
artifacts_in_boundary_per_cam = artifacts_in_boundary[ artifacts_in_boundary['ncam'] == cam_index ].reset_index(level=0, drop=True).copy()
path_for_filename = getPathToCropsPerCameraFromFile(camera_name)
artifacts_in_boundary_per_cam['filename'] = artifacts_in_boundary_per_cam['filename'].apply( getFilenameWithPath, prefix=path_for_filename )
saveResults( artifacts_in_boundary_per_cam, working_out_folder_path , "%s_boundary.txt" % camera_name[:-1] )
#saveResults( artifacts_in_boundary[ artifacts_in_boundary['ncam'] == cam_index ], working_out_folder_path , "%s_boundary.txt" % camera_name[:-1] )
#FILE: cameraXX.Y_boundary.txt
#FORMAT: camera, timestamp, coords, species, species_index (each detection one line)
## for-end
statusCameraElement = True
else:
print('Input CSVs are empty. Nothing to process.')
print('')
saveResults(
|
pd.DataFrame()
|
pandas.DataFrame
|
from unittest.mock import patch
import featuretools as ft
import pandas as pd
import pytest
import woodwork as ww
from featuretools.feature_base import IdentityFeature
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Datetime,
Double,
Integer,
)
from evalml.demos import load_diabetes
from evalml.pipelines.components import DFSTransformer
def test_index_errors(X_y_binary):
with pytest.raises(TypeError, match="Index provided must be string"):
DFSTransformer(index=0)
with pytest.raises(TypeError, match="Index provided must be string"):
DFSTransformer(index=None)
def test_numeric_columns(X_y_multi):
X, y = X_y_multi
X_pd = pd.DataFrame(X)
feature = DFSTransformer()
feature.fit(X_pd, y)
feature.transform(X_pd)
@patch("evalml.pipelines.components.transformers.preprocessing.featuretools.dfs")
@patch(
"evalml.pipelines.components.transformers.preprocessing.featuretools.calculate_feature_matrix"
)
def test_featuretools_index(mock_calculate_feature_matrix, mock_dfs, X_y_multi):
X, y = X_y_multi
X_pd = pd.DataFrame(X)
X_new_index = X_pd.copy()
index = [i for i in range(len(X))]
new_index = [i * 2 for i in index]
X_new_index["index"] = new_index
mock_calculate_feature_matrix.return_value = pd.DataFrame({})
# check if _make_entity_set keeps the intended index
feature = DFSTransformer()
feature.fit(X_new_index)
feature.transform(X_new_index)
arg_es = mock_dfs.call_args[1]["entityset"].dataframes[0].index
arg_tr = mock_calculate_feature_matrix.call_args[1]["entityset"].dataframes[0].index
assert arg_es.to_list() == new_index
assert arg_tr.to_list() == new_index
# check if _make_entity_set fills in the proper index values
feature.fit(X_pd)
feature.transform(X_pd)
arg_es = mock_dfs.call_args[1]["entityset"].dataframes[0].index
arg_tr = mock_calculate_feature_matrix.call_args[1]["entityset"].dataframes[0].index
assert arg_es.to_list() == index
assert arg_tr.to_list() == index
def test_transform(X_y_binary, X_y_multi, X_y_regression):
datasets = locals()
for dataset in datasets.values():
X, y = dataset
X_pd = pd.DataFrame(X)
X_pd.columns = X_pd.columns.astype(str)
es = ft.EntitySet()
es = es.add_dataframe(
dataframe_name="X", dataframe=X_pd, index="index", make_index=True
)
feature_matrix, features = ft.dfs(entityset=es, target_dataframe_name="X")
feature = DFSTransformer()
feature.fit(X)
X_t = feature.transform(X)
assert_frame_equal(feature_matrix, X_t)
assert features == feature.features
feature.fit(X, y)
feature.transform(X)
X_pd.ww.init()
feature.fit(X_pd)
feature.transform(X_pd)
def test_transform_subset(X_y_binary, X_y_multi, X_y_regression):
datasets = locals()
for dataset in datasets.values():
X, y = dataset
X_pd = pd.DataFrame(X)
X_pd.columns = X_pd.columns.astype(str)
X_fit = X_pd.iloc[: len(X) // 3]
X_transform = X_pd.iloc[len(X) // 3 :]
es = ft.EntitySet()
es = es.add_dataframe(
dataframe_name="X", dataframe=X_transform, index="index", make_index=True
)
feature_matrix, features = ft.dfs(entityset=es, target_dataframe_name="X")
feature = DFSTransformer()
feature.fit(X_fit)
X_t = feature.transform(X_transform)
assert_frame_equal(feature_matrix, X_t)
@pytest.mark.parametrize(
"X_df",
[
pd.DataFrame(
pd.to_datetime(["20190902", "20200519", "20190607"], format="%Y%m%d")
),
pd.DataFrame(pd.Series([1, 2, 3], dtype="Int64")),
pd.DataFrame(pd.Series([1.0, 2.0, 3.0], dtype="float")),
pd.DataFrame(
|
pd.Series(["a", "b", "a"], dtype="category")
|
pandas.Series
|
import argparse
import ast
import numpy as np
import pandas as pd
from data_preprocessing.data_creation import DataCreator
from data_preprocessing.text_utils import TextUtils
from md_data import create_annotations
from utils.data_io import DataSaverLoader
def create_dict_pl(questions_list, tokens_label):
"""When we create vocabulary with placeholders, no need to preprocess
sentences since they have already been preprocessed"""
tokens = [token for sentence in questions_list for token in sentence]
tokens.extend(list(tokens_label))
words = sorted(list(set(tokens)))
data_size, vocab_size = len(tokens), len(words)
print("Initialize dataset with {} characters, {} unique.".format(data_size, vocab_size))
word_to_ix = {ch: i + 1 for i, ch in enumerate(words)}
ix_to_word = {i + 1: ch for i, ch in enumerate(words)}
word_to_ix["UNK"] = len(word_to_ix) + 1
ix_to_word[len(ix_to_word) + 1] = "UNK"
return word_to_ix, ix_to_word
def create_dict_pl_separately(tokens):
"""This method will be called twice, once for the tokens in questions
and one for the tokens in predicate labels. Use this method if you do not
want to share embeddings for questions and predicates"""
words = sorted(list(set(tokens)))
data_size, vocab_size = len(tokens), len(words)
print("Initialize dataset with {} characters, {} unique.".format(data_size, vocab_size))
word_to_ix = {ch: i + 1 for i, ch in enumerate(words)}
ix_to_word = {i + 1: ch for i, ch in enumerate(words)}
word_to_ix["UNK"] = len(word_to_ix) + 1
ix_to_word[len(ix_to_word) + 1] = "UNK"
return word_to_ix, ix_to_word
def create_predicate_dictionaries(path):
train = pd.read_csv(path + "annotated_fb_data_train.txt", sep="\t", usecols=[1], names=["relation"])
valid = pd.read_csv(path + "annotated_fb_data_valid.txt", sep="\t", usecols=[1], names=["relation"])
test = pd.read_csv(path + "annotated_fb_data_test.txt", sep="\t", usecols=[1], names=["relation"])
train_relations = train["relation"].to_list()
valid_relations = valid["relation"].to_list()
test_relations = test["relation"].to_list()
train_relations.extend(valid_relations)
train_relations.extend(test_relations)
pred2ix, ix2pred, predicate_words, predicate_names = DataCreator.create_predicate_dict(train_relations)
return pred2ix, ix2pred, predicate_words, predicate_names
def create_vocab_dictionaries(args, placeholder=False, pred_w=None, pred_n=None, annotations=None, none_sbj=None,
separately=False, keywords=None, indices=[]):
"""
:param args: args
:param placeholder: placeholders exist or not
:param pred_w: predicate words
:param pred_n: predicate names
:param annotations: annotations
:param none_sbj: samples for which we do not have problematic mid
:param separately: use different embeddings for questions and predicates
:param keywords: use keywords or not
:param indices: target domain indices
:return: dictionaries word2ix, ix2word
"""
sbj_mid, predicate, obj_mid, question = DataCreator.get_spo_question(args.path_load_sq, "annotated_fb_data_train.txt")
# init
word2ix = None
ix2word = None
word2ix_predicates = None
ix2word_predicates = None
if (pred_w is None) and (not placeholder):
# for the multi-class models
word2ix, ix2word = DataCreator.create_dict(questions=question, eos=False, cut_frq=False, additional=keywords)
elif (pred_w is not None) and (keywords is None) and (not placeholder):
# for relatedness models without placeholders
print("Pred_w: ", len(pred_w), " Placeholder: ", placeholder)
tokens = [token for label_w in pred_w for token in label_w]
word2ix, ix2word = DataCreator.create_dict(questions=question, eos=False, cut_frq=False, additional=tokens)
elif (pred_w is not None) and (keywords is not None) and (not placeholder):
# for relatedness models without placeholders
# with word level predicate labels and keywords
print("Pred_w: ", len(pred_w), " Placeholder:", placeholder, " Keywords:", len(keywords))
tokens = [token for label_w in pred_w for token in label_w] + keywords
word2ix, ix2word = DataCreator.create_dict(questions=question, eos=False, cut_frq=False, additional=tokens)
elif (pred_w is not None) and (keywords is not None) and placeholder:
# for relatedness models with placeholders
# with word level predicate labels and keywords
print("Pred_w: ", len(pred_w), " Placeholder:", placeholder, " Keywords:", len(keywords))
question_words = []
for q in question:
question_words.append([token for token in TextUtils.preprocess(q)])
question = replace_plchdr(np.delete(np.array(question_words), none_sbj, axis=0), annotations)
# comment next line if there are no training indices to delete
print("delete unseen domain questions")
print(len(indices))
question = list(np.delete(np.array(question), indices, axis=0))
additional_predicate_txt = set([w for p_w in pred_w for w in p_w]).union(set(keywords))
word2ix, ix2word = create_dict_pl(question, additional_predicate_txt)
elif (pred_w is not None) and placeholder:
# for relatedness models with placeholders instead of subject entities in question
question_words = []
for q in question:
question_words.append([token for token in TextUtils.preprocess(q)])
question = replace_plchdr(np.delete(np.array(question_words), none_sbj, axis=0), annotations)
additional_predicate_txt = set([w for p_w in pred_w for w in p_w]) if pred_n is None else set(
[w for p_w in pred_w for w in p_w]).union(set([n for p_n in pred_n for n in p_n]))
if separately:
# create different vocabs for question and predicate labels
word2ix, ix2word = create_dict_pl_separately([token for sentence in question for token in sentence])
word2ix_predicates, ix2word_predicates = create_dict_pl_separately(additional_predicate_txt)
else:
# same vocab for question and predicate labels
# comment next line if there are no indices to delete
print("delete unseen domain questions")
print(len(indices))
question = list(np.delete(np.array(question), indices, axis=0))
word2ix, ix2word = create_dict_pl(question, additional_predicate_txt)
print("Dictionary size: ", len(word2ix))
return word2ix, ix2word, word2ix_predicates, ix2word_predicates
def remove_seq_duplicates(question):
""" Removes consequent sbj placeholders"""
i = 0
while i < len(question) - 1:
if question[i] == question[i + 1] and question[i] == "sbj":
del question[i]
else:
i = i + 1
return question
def replace_plchdr(questions, annotations, name="train"):
questions_placeholder = []
for i in range(len(questions)):
tmp = questions[i]
sbj_exists = np.where(np.array(annotations[i]) == 1)[0]
if name == "test":
if len(sbj_exists) > 0:
q_new = [word if index not in sbj_exists else "sbj" for index, word in enumerate(tmp)]
q_new = remove_seq_duplicates(q_new)
questions_placeholder.append(q_new)
else:
questions_placeholder.append(tmp)
else:
tmp[sbj_exists[0]] = "sbj"
if len(sbj_exists) > 1:
tmp_ = np.delete(tmp, sbj_exists[1:])
questions_placeholder.append(tmp_)
else:
questions_placeholder.append(tmp)
return questions_placeholder
def create_data_pl(questions_list, word_to_ix):
sentence_ids = []
for q in questions_list:
sentence_ids.append([word_to_ix[token]
if token in word_to_ix else word_to_ix["UNK"] for token in q])
return sentence_ids
def create_data(path, word2ix, pred2ix):
"""
:param path: path for an annotated freebase data file
:param word2ix: word to id
:param pred2ix: predicate to id
:return: subject mids, predicates, object mids, data(questions as word ids), targets(predicate ids), questions
"""
df_data = pd.read_csv(path, usecols=[0, 1, 2, 3, 4, 6])
sbj_mid = df_data["subject"].to_list()
obj_mid = df_data["object"].to_list()
predicate = df_data["relation"].to_list()
annotations = df_data["annotation"].apply(ast.literal_eval).to_list()
question = df_data["question_initial"].to_list()
data, question = DataCreator.create_data(question, word2ix, eos=False)
targets = DataCreator.create_targets(predicate, pred2ix)
return sbj_mid, predicate, obj_mid, data, targets, question
def create_data_placeholder(path, word2ix, pred2ix, name):
"""Takes as input the path for an annotated freebase data file, and returns
subject mids, predicates, object mids, data(questions as word ids), targets(predicate ids), questions"""
use_col = [0, 1, 2, 3, 4, 6, 7] if name == "test" else [0, 1, 2, 3, 4, 6]
df_data = pd.read_csv(path, usecols=use_col)
sbj_mid = df_data["subject"].to_list()
obj_mid = df_data["object"].to_list()
predicate = df_data["relation"].to_list()
annotations = df_data["annotation"].apply(ast.literal_eval).to_list() if name != "test" else df_data[
"prediction"].apply(ast.literal_eval).to_list()
question = df_data["question_initial"].to_list()
question_words = []
for q in question:
question_words.append([token for token in TextUtils.preprocess(q)])
question = replace_plchdr(np.array(question_words), annotations, name)
data = create_data_pl(question, word2ix)
targets = DataCreator.create_targets(predicate, pred2ix)
return sbj_mid, predicate, obj_mid, data, targets, question
def find_remove_domain_ids(domain, ix2pred):
""" finds the relation types within the target domain """
ids = []
for key, value in ix2pred.items():
value_ = value.split("/")[0]
if value_ in [domain]:
ids.append(key)
return ids
def create_targets(predicates, pred_to_ix):
train_targets = list()
for p in predicates:
train_targets.append(np.array(pred_to_ix[p.replace("\n", "").replace("www.freebase.com/", "")]).reshape(1))
return train_targets
def preprocess_synthetic_questions(df, mid2entity, predicate_names):
""" adds placeholders on the synthetic questions """
add_questions = []
additional_quest = []
for i in range(len(df)):
reference = [df["y_label_post_proc"][i].replace("_END_", "").replace("_START_", "").split()]
candidate = df["y_post_proc"][i].replace("_END_", "").replace("_START_", "").split()
new_cand = []
if "_PLACEHOLDER_SUB_" not in candidate:
tmp = mid2entity[df["sub"][i].replace("www.freebase.com/m/", "")]
if len(tmp) != 0:
annotations_padded, wrong_num_of_ent, wrong_num_of_ent_num = create_annotations([candidate], mid2entity[
df["sub"][i].replace("www.freebase.com/m/", "")], [])
if 1 in annotations_padded[0]:
inds = [index for index, x in enumerate(annotations_padded[0]) if x == 1]
candidate = ["_PLACEHOLDER_SUB_" if index in inds else word for index, word in
enumerate(candidate)]
for word in candidate:
if word == "_PLACEHOLDER_SUB_" and "sbj" not in new_cand:
new_cand.append("sbj")
elif word != "_PLACEHOLDER_SUB_":
new_cand.append(word)
new_cand = TextUtils.preprocess(" ".join(new_cand))
add_questions.extend(new_cand)
additional_quest.append(new_cand)
for pp in predicate_names:
add_questions.extend(pp)
return add_questions, additional_quest
def check_args(args, parser):
if args.use_synthetic_questions and (args.path_load_synthetic is None):
parser.error("when --use_synthetic_questions, you must provide --path_load_synthetic")
if args.use_keywords and (args.path_load_keywords is None):
parser.error("when --use_keywords, you must provide --path_load_keywords")
if args.use_relation_words_only and (args.use_keywords or args.use_synthetic_questions):
parser.error("when --use_relation_words_only, --use_synthetic_questions | --use_keywords"
"should not be given as arguments")
def main():
"""
path_load_sq: path of the original SimpleQuestions dataset
path_load_md_data: path of the folder generated after running MD
path_load_synthetic: path of the csv file generated by QG
path_load_mid2ent: path of the folder in which mid2ent exists in
path_load_keywords: path of the folder in which pred2key exists in (this file is created when extracting keywords)
path_save: where to save the RP data
target_domain: the target domain (e.g. book, film, astronomy etc)
placeholders: placeholders in questions instead of subject names or questions in the original format
use_relation_words_only: use only words from original questions not to be used with keywords or synthetic questions
use_keywords: if keywords w.r.t relation will be provided
use_synthetic_questions: if synthetic questions of the target domain will be provided
"""
parser = argparse.ArgumentParser()
parser.add_argument("--path_load_sq", type=str, default=None, required=True)
parser.add_argument("--path_load_md_data", type=str, default=None)
parser.add_argument("--path_load_synthetic", type=str, default=None)
parser.add_argument("--path_load_mid2ent", type=str, default=None, required=True)
parser.add_argument("--path_load_keywords", type=str, default=None)
parser.add_argument("--path_save", type=str, default=None, required=True)
parser.add_argument("--target_domain", type=str, default=None)
parser.add_argument("--placeholders", action="store_true")
parser.add_argument("--use_relation_words_only", action="store_true")
parser.add_argument("--use_keywords", action="store_true")
parser.add_argument("--use_synthetic_questions", action="store_true")
args = parser.parse_args()
# check if args are provided correctly
check_args(args, parser)
save_path = args.path_save
# load mid to entity dictionary
mid2entity = DataSaverLoader.load_pickle(path=args.path_load_mid2ent, filename="mid2ent")
# if args.placeholders:
# # load train annotations only for the placeholder case (plc in questions)
df_train = pd.read_csv(args.path_load_md_data + "/train/" + "/data.csv", usecols=[4])
train_annotations = df_train["annotation"].apply(ast.literal_eval).to_list()
train_none_sbj = DataSaverLoader.load_pickle(path=args.path_load_md_data + "/train/", filename="none_sbj")
# create predicate label to id, id to predicate dictionaries
# and word level predicate labels and name level predicate labels list
pred2ix, ix2pred, predicate_words, predicate_names = create_predicate_dictionaries(args.path_load_sq)
if args.use_keywords:
# load keywords so they can be included in the vocab
predicate2keywords = DataSaverLoader.load_pickle(path=args.path_load_keywords, filename="pred2key")
keywords_total = []
for keywords in predicate2keywords.values():
keywords_total.extend(keywords)
# indices to delete, if there is no domain given it will remain empty
indices = []
if args.target_domain is not None:
# find the training samples of the target domain which appear in the initial training set
# those samples need to be removed for the domain adaptation scenario, otherwise we have
# information leakage
train = pd.read_csv(args.path_load_sq + "annotated_fb_data_train.txt", sep="\t", usecols=[1], names=["relation"])
labels_train = create_targets(train["relation"].to_list(), pred2ix)
# find the relations types of which are part of the target domain
rem = find_remove_domain_ids(args.target_domain, ix2pred)
for indx, v in enumerate(labels_train):
if v[0] in rem:
indices.append(indx)
indices_to_delete = np.zeros(len(labels_train))
indices_to_delete[indices] = 1
indices_to_delete = np.delete(indices_to_delete, train_none_sbj, 0)
indices = np.where(indices_to_delete == 1)[0]
if args.use_synthetic_questions and args.placeholders:
# the text of the target domain synthetic questions should be part of the final vocabulary
path_noisy_q = args.path_load_synthetic
new_q = pd.read_csv(path_noisy_q)
add_questions, additional_quest = preprocess_synthetic_questions(new_q, mid2entity, predicate_names)
elif args.use_synthetic_questions and not args.placeholders:
path_noisy_q = args.path_load_synthetic
new_q =
|
pd.read_csv(path_noisy_q)
|
pandas.read_csv
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position="last")
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position="first")
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_index_nan(self):
# GH#3917
# Test DataFrame with nan label
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
expected = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position="first")
expected = DataFrame(
{"A": [4, 1, 2, np.nan, 1, 6, 8], "B": [5, 9, np.nan, 5, 2, 5, 4]},
index=[np.nan, 1, 2, 3, 4, 5, 6],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=False)
expected = DataFrame(
{"A": [8, 6, 1, np.nan, 2, 1, 4], "B": [4, 5, 2, 5, np.nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind="quicksort", ascending=False, na_position="first"
)
expected = DataFrame(
{"A": [4, 8, 6, 1, np.nan, 2, 1], "B": [5, 4, 5, 2, 5, np.nan, 9]},
index=[np.nan, 6, 5, 4, 3, 2, 1],
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_index_multi_index(self):
# GH#25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
)
result = df.set_index(list("abc")).sort_index(level=list("ba"))
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
)
expected = expected.set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered["A"])
df = unordered.copy()
return_value = df.sort_index(inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
assert a_id != id(df["A"])
df = unordered.copy()
return_value = df.sort_index(ascending=False, inplace=True)
assert return_value is None
expected = frame[::-1]
tm.assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ["D", "B", "C", "A"]]
df = unordered.copy()
return_value = df.sort_index(axis=1, inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
df = unordered.copy()
return_value = df.sort_index(axis=1, ascending=False, inplace=True)
assert return_value is None
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
# test with multiindex, too
idf = df.set_index(["A", "B"])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
tm.assert_frame_equal(result, expected)
# also, Series!
result = idf["C"].sort_index(ascending=[1, 0])
tm.assert_series_equal(result, expected["C"])
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level="A", sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["A", "B"], sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
# Error thrown by sort_index when
# first index is sorted last (GH#26053)
result = df.sort_index(level=["C", "B", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["B", "C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
def test_sort_index_categorical_index(self):
df = DataFrame(
{
"A": np.arange(6, dtype="int64"),
"B": Series(list("aabbca")).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_frame_equal(result, expected)
def test_sort_index(self):
# GH#13496
frame = DataFrame(
np.arange(16).reshape(4, 4),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
tm.assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
tm.assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
tm.assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ["A", 0]) # GH#21052
def test_sort_index_multiindex(self, level):
# GH#13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples(
[[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list("ABC")
)
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC")
)
expected = DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi)
result = df.sort_index(level=level)
tm.assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC")
)
expected = DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
tm.assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)), bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=["Y", "X1", "X2"])
result = model.groupby(["X1", "X2"], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right"
)
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, False, [5, 3, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, False, [2, 3, 5]),
],
)
def test_sort_index_ignore_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114
original_index = [2, 5, 3]
df = DataFrame(original_dict, index=original_index)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
False,
MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB")),
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
False,
MultiIndex.from_tuples([[3, 4], [2, 1]], names=list("AB")),
),
],
)
def test_sort_index_ignore_index_multi_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114, this is to test ignore_index on MulitIndex of index
mi = MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB"))
df = DataFrame(original_dict, index=mi)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=mi))
def test_sort_index_categorical_multiindex(self):
# GH#15058
df = DataFrame(
{
"a": range(6),
"l1": pd.Categorical(
["a", "a", "b", "b", "c", "c"],
categories=["c", "a", "b"],
ordered=True,
),
"l2": [0, 1, 0, 1, 0, 1],
}
)
result = df.set_index(["l1", "l2"]).sort_index()
expected = DataFrame(
[4, 5, 0, 1, 2, 3],
columns=["a"],
index=MultiIndex(
levels=[
CategoricalIndex(
["c", "a", "b"],
categories=["c", "a", "b"],
ordered=True,
name="l1",
dtype="category",
),
[0, 1],
],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=["l1", "l2"],
),
)
tm.assert_frame_equal(result, expected)
def test_sort_index_and_reconstruction(self):
# GH#15622
# lexsortedness should be identical
# across MultiIndex construction methods
df = DataFrame([[1, 1], [2, 2]], index=list("ab"))
expected = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_tuples(
[(0.5, "a"), (0.5, "b"), (0.8, "a"), (0.8, "b")]
),
)
assert expected.index.is_lexsorted()
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_product([[0.5, 0.8], list("ab")]),
)
result = result.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex(
levels=[[0.5, 0.8], ["a", "b"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = result.sort_index()
assert result.index.is_lexsorted()
tm.assert_frame_equal(result, expected)
concatted = pd.concat([df, df], keys=[0.8, 0.5])
result = concatted.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# GH#14015
df = DataFrame(
[[1, 2], [6, 7]],
columns=MultiIndex.from_tuples(
[(0, "20160811 12:00:00"), (0, "20160809 12:00:00")],
names=["l1", "Date"],
),
)
df.columns = df.columns.set_levels(
pd.to_datetime(df.columns.levels[1]), level=1
)
assert not df.columns.is_lexsorted()
assert not df.columns.is_monotonic
result = df.sort_index(axis=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
result = df.sort_index(axis=1, level=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
# TODO: better name, de-duplicate with test_sort_index_level above
def test_sort_index_level2(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
df = frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = frame["A"].sort_index(level=0)
# preserve names
assert a_sorted.index.names == frame.index.names
# inplace
rs = frame.copy()
return_value = rs.sort_index(level=0, inplace=True)
assert return_value is None
tm.assert_frame_equal(rs, frame.sort_index(level=0))
def test_sort_index_level_large_cardinality(self):
# GH#2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64)
# it works!
result = df.sort_index(level=0)
assert result.index.lexsort_depth == 3
# GH#2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32)
# it works!
result = df.sort_index(level=0)
assert (result.dtypes.values == df.dtypes.values).all()
assert result.index.lexsort_depth == 3
def test_sort_index_level_by_name(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
frame.index.names = ["first", "second"]
result = frame.sort_index(level="second")
expected = frame.sort_index(level=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level_mixed(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
sorted_before = frame.sort_index(level=1)
df = frame.copy()
df["foo"] = "bar"
sorted_after = df.sort_index(level=1)
tm.assert_frame_equal(sorted_before, sorted_after.drop(["foo"], axis=1))
dft = frame.T
sorted_before = dft.sort_index(level=1, axis=1)
dft["foo", "three"] = "bar"
sorted_after = dft.sort_index(level=1, axis=1)
tm.assert_frame_equal(
sorted_before.drop([("foo", "three")], axis=1),
sorted_after.drop([("foo", "three")], axis=1),
)
def test_sort_index_preserve_levels(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.sort_index()
assert result.index.names == frame.index.names
@pytest.mark.parametrize(
"gen,extra",
[
([1.0, 3.0, 2.0, 5.0], 4.0),
([1, 3, 2, 5], 4),
(
[
Timestamp("20130101"),
Timestamp("20130103"),
|
Timestamp("20130102")
|
pandas.Timestamp
|
import pandas as pd
import numpy as np
import unittest
class TestSeriesSub(unittest.TestCase):
"""
Test the Pandas.Series.sub function
sub() will subtract the elements of the corresponding indices from the Series it's called on and the Series passed as'other' argument.
This tests focuses only on the parameters 'others',and the Series it's called on.
The other parameters are not set and tested with their default values.
"""
def setUp(self):
# regular subtraction
self.series1a = pd.Series([4,1,2,3])
self.series1b = pd.Series([6,9,4,8])
self.series1c = pd.Series([-2,-8,-2,-5])
# subtraction with NaN
self.series2a = pd.Series([1,np.nan,2,3])
self.series2b =
|
pd.Series([4,np.nan,np.nan,6])
|
pandas.Series
|
import asyncio
import json
import logging
import math
import os
import sys
from argparse import ArgumentParser
from concurrent.futures import CancelledError
from csv import DictWriter
from datetime import date
from io import StringIO
from itertools import chain
from pathlib import Path
from urllib.parse import urlencode
import aiofiles
import aiohttp
import pandas as pd
import numpy as np
from geopy.distance import vincenty
DTYPE = dict(cnpj=np.str, cnpj_cpf=np.str)
LOG_FORMAT = '[%(levelname)s] %(asctime)s: %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=LOG_FORMAT)
class GooglePlacesURL:
BASE_URL = 'https://maps.googleapis.com/maps/api/place/'
def __init__(self, key):
self.key = key
def url(self, endpoint, query=None, format='json'):
"""
:param endpoint: (str) Google Places API endpoint name (e.g. details)
:param query: (tuple) tuples with key/values pairs for the URL query
:param format: (str) output format (default is `json`)
:return: (str) URL to do an authenticated Google Places request
"""
key = ('key', self.key)
query = tuple(chain(query, (key,))) if query else (key)
parts = (
self.BASE_URL,
endpoint,
'/{}?'.format(format),
urlencode(query)
)
return ''.join(parts)
def details(self, place):
"""
:param place: (int or str) ID of the place in Google Place
:return: (str) URL to do a place details Google Places search
"""
query = (('placeid', place),)
return self.url('details', query)
def nearby(self, keyword, location):
"""
:param keywork: (str) category to search places
:return: (str) URL to do a nearby Google Places search
"""
query = (
('location', location),
('keyword', keyword),
('rankby', 'distance'),
)
return self.url('nearbysearch', query)
class SexPlacesNearBy:
KEYWORDS = ('acompanhantes',
'adult entertainment club',
'adult entertainment store',
'gay sauna',
'massagem',
'modeling agency',
'motel',
'night club',
'sex club',
'sex shop',
'strip club',
'swinger clubs')
def __init__(self, company, key=None):
"""
:param company: (dict) Company with name, cnpj, latitude and longitude
:param key: (str) Google Places API key
"""
self.url = GooglePlacesURL(key or config('GOOGLE_API_KEY'))
self.company = company
self.latitude = self.company['latitude']
self.longitude = self.company['longitude']
self.places = []
self.closest = None
@property
def company_name(self):
return self.company.get('trade_name') or self.company.get('name')
@property
def valid(self):
try:
coords = map(float, (self.latitude, self.longitude))
except ValueError:
return False
if any(map(math.isnan, coords)):
return False
return True
async def get_closest(self):
"""
Start the requests to store a place per self.KEYWORD in self.places.
Then gets the closest place found, queries for its details and returns
a dict with the details for that place.
"""
if not self.valid:
msg = 'No geolocation information for company: {} ({})'
logging.info(msg.format(self.company_name, self.company['cnpj']))
return None
tasks = [self.load_place(k) for k in self.KEYWORDS]
await asyncio.gather(*tasks)
ordered = sorted(self.places, key=lambda x: x['distance'])
for place in ordered:
place = await self.load_details(place)
name = place.get('name', '').lower()
if place.get('keyword') == 'motel' and 'hotel' in name:
pass # google returns hotels when looking for a motel
else:
prefix = '💋 ' if place.get('distance') < 5 else ''
msg = '{}Found something interesting {:.2f}m away from {}…'
args = (prefix, place.get('distance'), self.company_name)
logging.info(msg.format(*args))
self.closest = place
return place
async def load_place(self, keyword, print_status=False):
"""
Given a keyword it loads the place returned by the API to self.places.
"""
if print_status:
msg = 'Looking for a {} near {} ({})…'
args = (keyword, self.company_name, self.company.get('cnpj'))
logging.info(msg.format(*args))
location = '{},{}'.format(self.latitude, self.longitude)
url = self.url.nearby(keyword, location)
try:
response = await aiohttp.request('GET', url)
except aiohttp.TimeoutError:
logging.info('Timeout raised for {}'.format(url))
else:
content = await response.text()
place = self.parse(keyword, content)
if place and isinstance(place.get('distance'), float):
self.places.append(place)
def parse(self, keyword, content):
"""
Return a dictionary with information of the nearest sex place
around a given company.
:param keyword: (str) keyword used by the request
:param content: (str) content of the response to the request
:return: (dict) with
* name : The name of nearest sex place
* latitude : The latitude of nearest sex place
* longitude : The longitude of nearest sex place
* distance : Distance (in meters) between the company and the
nearest sex place
* address : The address of the nearest sex place
* phone : The phone of the nearest sex place
* id : The Google Place ID of the nearest sex place
* keyword : term that matched the sex place in Google Place Search
Google responses:
* `OK` indicates that no errors occurred; the place was
successfully detected and at least one result was returned.
* `UNKNOWN_ERROR` indicates a server-side error; trying again may
be successful.
* `ZERO_RESULTS` indicates that the reference was valid but no
longer refers to a valid result. This may occur if the
establishment is no longer in business.
* `OVER_QUERY_LIMIT` indicates that you are over your quota.
* `REQUEST_DENIED` indicates that your request was denied,
generally because of lack of an invalid key parameter.
* `INVALID_REQUEST` generally indicates that the query (reference)
is missing.
* `NOT_FOUND` indicates that the referenced location was not found
in the Places database.<Paste>
Source: https://developers.googlefetchplaces/web-service/details
"""
response = json.loads(content)
status = response.get('status')
if status != 'OK':
if status in ('OVER_QUERY_LIMIT', 'REQUEST_DENIED'):
shutdown() # reached API limit or API key is missing/wrong
if status != 'ZERO_RESULTS':
error = response.get('error', '')
msg = 'Google Places API Status: {} {}'.format(status, error)
logging.info(msg.strip())
return None
place, *_ = response.get('results', [{}])
location = place.get('geometry', {}).get('location', {})
latitude = float(location.get('lat'))
longitude = float(location.get('lng'))
company_location = (self.latitude, self.longitude)
place_location = (latitude, longitude)
distance = vincenty(company_location, place_location)
return {
'id': place.get('place_id'),
'keyword': keyword,
'latitude': latitude,
'longitude': longitude,
'distance': distance.meters,
'cnpj': self.company.get('cnpj'),
'company_name': self.company.get('name'),
'company_trade_name': self.company.get('trade_name')
}
async def load_details(self, place):
"""
:param place: dictionary with id key.
:return: dictionary updated with name, address and phone.
"""
place_id = place.get('id')
if not place_id:
return place
# request place details
try:
response = await aiohttp.request('GET', self.url.details(place_id))
except aiohttp.TimeoutError:
logging.info('Timeout raised for {}'.format(url))
return place
else:
content = await response.text()
# parse place details
try:
details = json.loads(content)
except ValueError:
return place
else:
if not details:
return place
result = details.get('result', {})
place.update(dict(
name=result.get('name', ''),
address=result.get('formatted_address', ''),
phone=result.get('formatted_phone_number', '')
))
return place
async def write_to_csv(path, place=None, **kwargs):
"""
Receives a given place (dict) and writes it in the CSV format into path.
CSV headers are defined in `fields`. The named argument `headers`
(bool) determines if the functions write the CSV header or not.
"""
headers = kwargs.get('headers', False)
if not place and not headers:
return
fields = (
'id', 'keyword', 'latitude', 'longitude', 'distance', 'name',
'address', 'phone', 'cnpj', 'company_name', 'company_trade_name'
)
with StringIO() as obj:
writer = DictWriter(obj, fieldnames=fields, extrasaction='ignore')
if headers:
writer.writeheader()
if place:
writer.writerow(place)
async with aiofiles.open(path, mode='a') as fh:
await fh.write(obj.getvalue())
async def fetch_place(company, output, semaphore):
"""
Gets a company (dict), finds the closest place nearby and write the result
to a CSV file.
"""
with (await semaphore):
places = SexPlacesNearBy(company)
await places.get_closest()
if places.closest:
await write_to_csv(output, places.closest)
async def main_coro(companies, output, max_requests):
"""
:param companies: (Pandas DataFrame)
:param output: (str) Path to the CSV output
:param max_requests: (int) max parallel requests
"""
# write CSV headers
if is_new_dataset(output) and not companies.empty:
await write_to_csv(output, headers=True)
semaphore = asyncio.Semaphore(max_requests // 13) # 13 reqs per company
tasks = []
logging.info("Let's get started!")
# write CSV data
for company_row in companies.itertuples(index=True):
company = dict(company_row._asdict()) # _asdict() returns OrderedDict
tasks.append(fetch_place(company, output, semaphore))
await asyncio.wait(tasks)
def find_newest_file(pattern='*.*', source_dir='.'):
"""
Assuming that the files will be in the form of:
yyyy-mm-dd-type-of-file.xz we can try to find the newest file
based on the date.
"""
files = sorted(Path(source_dir).glob(pattern))
if not files:
return None
file = files.pop()
if not file:
return None
return str(file)
def load_newest_dataset(pattern, usecols, na_value=''):
filepath = find_newest_file(pattern)
if not filepath:
return None
logging.info('Loading {}'.format(filepath))
dataset = pd.read_csv(
filepath,
dtype=DTYPE,
low_memory=False,
usecols=usecols
)
dataset = dataset.fillna(value=na_value)
return dataset
def get_companies(companies_path, **kwargs):
"""
Compares YYYY-MM-DD-companies.xz with the newest
YYYY-MM-DD-sex-place-distances.xz and returns a DataFrame with only
the rows matching the search criteria, excluding already fetched companies.
Keyword arguments are expected: term (int), value (float) and city (str)
"""
filters = tuple(map(kwargs.get, ('term', 'value', 'city')))
if not all(filters):
raise TypeError('get_companies expects term, value and city as kwargs')
term, value, city = filters
# load companies
cols = ('cnpj', 'trade_name', 'name', 'latitude', 'longitude', 'city')
companies = load_newest_dataset(companies_path, cols)
companies['cnpj'] = companies['cnpj'].str.replace(r'\D', '')
# load & fiter reimbursements
cols = ('total_net_value', 'cnpj_cpf', 'term')
reimbursements = load_newest_dataset('data/*-reimbursements.xz', cols)
query = '(term == {}) & (total_net_value >= {})'.format(term, value)
reimbursements = reimbursements.query(query)
# load & filter companies
on = dict(left_on='cnpj', right_on='cnpj_cpf')
companies = pd.merge(companies, reimbursements, **on)
del(reimbursements)
companies.drop_duplicates('cnpj', inplace=True)
query = 'city.str.upper() == "{}"'.format(city.upper())
companies = companies.query(query)
# clean up companies
del(companies['cnpj_cpf'])
del(companies['term'])
del(companies['total_net_value'])
del(companies['city'])
# load sexplaces & filter remaining companies
cols = ('cnpj', )
sex_places = load_newest_dataset('data/*-sex-place-distances.xz', cols)
if sex_places is None or sex_places.empty:
return companies
return companies[~companies.cnpj.isin(sex_places.cnpj)]
def is_new_dataset(output):
sex_places = find_newest_file('*sex-place-distances.xz', 'data')
if not sex_places:
return True
# convert previous database from xz to csv
|
pd.read_csv(sex_places, dtype=DTYPE)
|
pandas.read_csv
|
from __future__ import annotations
import copy
import itertools
from typing import (
TYPE_CHECKING,
Sequence,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
internals as libinternals,
)
from pandas._libs.missing import NA
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
Shape,
)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
ensure_dtype_can_hold_na,
find_common_type,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_1d_only_ea_obj,
is_datetime64tz_dtype,
is_dtype_equal,
needs_i8_conversion,
)
from pandas.core.dtypes.concat import (
cast_to_common_type,
concat_compat,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna_all,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.internals.array_manager import (
ArrayManager,
NullArrayProxy,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
new_block,
)
from pandas.core.internals.managers import BlockManager
if TYPE_CHECKING:
from pandas import Index
def _concatenate_array_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate array managers into one.
Parameters
----------
mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
ArrayManager
"""
# reindex all arrays
mgrs = []
for mgr, indexers in mgrs_indexers:
for ax, indexer in indexers.items():
mgr = mgr.reindex_indexer(
axes[ax], indexer, axis=ax, allow_dups=True, use_na_proxy=True
)
mgrs.append(mgr)
if concat_axis == 1:
# concatting along the rows -> concat the reindexed arrays
# TODO(ArrayManager) doesn't yet preserve the correct dtype
arrays = [
concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))])
for j in range(len(mgrs[0].arrays))
]
else:
# concatting along the columns -> combine reindexed arrays in a single manager
assert concat_axis == 0
arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))
if copy:
arrays = [x.copy() for x in arrays]
new_mgr = ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False)
return new_mgr
def concat_arrays(to_concat: list) -> ArrayLike:
"""
Alternative for concat_compat but specialized for use in the ArrayManager.
Differences: only deals with 1D arrays (no axis keyword), assumes
ensure_wrapped_if_datetimelike and does not skip empty arrays to determine
the dtype.
In addition ensures that all NullArrayProxies get replaced with actual
arrays.
Parameters
----------
to_concat : list of arrays
Returns
-------
np.ndarray or ExtensionArray
"""
# ignore the all-NA proxies to determine the resulting dtype
to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)]
dtypes = {x.dtype for x in to_concat_no_proxy}
single_dtype = len(dtypes) == 1
if single_dtype:
target_dtype = to_concat_no_proxy[0].dtype
elif all(x.kind in ["i", "u", "b"] and isinstance(x, np.dtype) for x in dtypes):
# GH#42092
target_dtype = np.find_common_type(list(dtypes), [])
else:
target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy])
if target_dtype.kind in ["m", "M"]:
# for datetimelike use DatetimeArray/TimedeltaArray concatenation
# don't use arr.astype(target_dtype, copy=False), because that doesn't
# work for DatetimeArray/TimedeltaArray (returns ndarray)
to_concat = [
arr.to_array(target_dtype) if isinstance(arr, NullArrayProxy) else arr
for arr in to_concat
]
return type(to_concat_no_proxy[0])._concat_same_type(to_concat, axis=0)
to_concat = [
arr.to_array(target_dtype)
if isinstance(arr, NullArrayProxy)
else cast_to_common_type(arr, target_dtype)
for arr in to_concat
]
if isinstance(to_concat[0], ExtensionArray):
cls = type(to_concat[0])
return cls._concat_same_type(to_concat)
result = np.concatenate(to_concat)
# TODO decide on exact behaviour (we shouldn't do this only for empty result)
# see https://github.com/pandas-dev/pandas/issues/39817
if len(result) == 0:
# all empties -> check for bool to not coerce to float
kinds = {obj.dtype.kind for obj in to_concat_no_proxy}
if len(kinds) != 1:
if "b" in kinds:
result = result.astype(object)
return result
def concatenate_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
BlockManager
"""
# TODO(ArrayManager) this assumes that all managers are of the same type
if isinstance(mgrs_indexers[0][0], ArrayManager):
return _concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy)
concat_plans = [
_get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
]
concat_plan = _combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
unit = join_units[0]
blk = unit.block
if len(join_units) == 1 and not join_units[0].indexers:
values = blk.values
if copy:
values = values.copy()
else:
values = values.view()
fastpath = True
elif _is_uniform_join_units(join_units):
vals = [ju.block.values for ju in join_units]
if not blk.is_extension:
# _is_uniform_join_units ensures a single dtype, so
# we can use np.concatenate, which is more performant
# than concat_compat
values = np.concatenate(vals, axis=blk.ndim - 1)
else:
# TODO(EA2D): special-casing not needed with 2D EAs
values = concat_compat(vals, axis=1)
values = ensure_block_shape(values, blk.ndim)
values =
|
ensure_wrapped_if_datetimelike(values)
|
pandas.core.construction.ensure_wrapped_if_datetimelike
|
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import gc
import stacknet_funcs as funcs
from math import ceil
import numpy as np
from os import chdir
folder = "F:/Nerdy Stuff/Kaggle/Talking data/data/"
sparse_array_path = 'F:/Nerdy Stuff/Kaggle/Talking data/sparse matricies/'
predictors = []
run = "test"
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint8',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32',
}
if run == "train":
file = folder + "train.csv"
cols = ['ip', 'app', 'device', 'os', 'channel', 'click_time', 'is_attributed']
print('loading %s data...' % (run))
base_df = pd.read_csv(file, parse_dates=['click_time'], low_memory=True,dtype=dtypes, usecols=cols)
if run == "test":
print('loading %s data...' % (run))
file = folder + "test.csv"
cols = ['ip', 'app', 'device', 'os', 'channel', 'click_time']
base_df = pd.read_csv(file, parse_dates=['click_time'], dtype=dtypes, usecols=cols)
rows = base_df.shape[0]
iters = 100
iter_rows = ceil(rows/iters)
X_ttl = np.empty((0, 31))
y_ttl = np.empty((0, ))
start_point = 0
for i in list(range(start_point, iters)):
print("Cut # %i" % (i))
if i == 0:
start = i * iter_rows
end = (i + 1) * iter_rows
print("start row = %s and end row = %s" % (start, end))
df = base_df.iloc[start:end, :].copy()
else:
start = i * iter_rows + 1
end = (i + 1) * iter_rows
print("start row = %s and end row = %s" % (start, end))
df = base_df.iloc[start:end, :].copy()
df['hour'] = pd.to_datetime(df.click_time).dt.hour.astype('int8')
df['day'] =
|
pd.to_datetime(df.click_time)
|
pandas.to_datetime
|
import os, sys
import pandas as pd
import argparse
sys.path.insert(0, os.path.abspath("../../"))
from constants import COL_HEADERS_TEST
from common import *
def get_score_at_percentile(f, p):
data = pd.read_csv(f)
data = data[data['percentile'] == p]
score = data.iloc[0]['score']
print("data at p: \n", data)
print("score: ", score)
return score
# get the scores below a threshold
# then get the windows associated with these scores
def get_windows_below_thresh(f, thresh):
data =
|
pd.read_csv(f)
|
pandas.read_csv
|
import pandas as pd
c1 = pd.read_csv('machine/Calling/Sensors_1.csv')
c2 = pd.read_csv('machine/Calling/Sensors_2.csv')
c3 = pd.read_csv('machine/Calling/Sensors_3.csv')
c4 = pd.read_csv('machine/Calling/Sensors_4.csv')
c5 = pd.read_csv('machine/Calling/Sensors_5.csv')
c6 = pd.read_csv('machine/Calling/Sensors_6.csv')
c7 = pd.read_csv('machine/Calling/Sensors_7.csv')
c8 = pd.read_csv('machine/Calling/Sensors_8.csv')
c9 = pd.read_csv('machine/Calling/Sensors_9.csv')
c10 = pd.read_csv('machine/Calling/Sensors_10.csv')
calling = pd.concat([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10], axis = 0)
t1 = pd.read_csv('machine/Texting/Sensors_1.csv')
t2 = pd.read_csv('machine/Texting/Sensors_2.csv')
t3 = pd.read_csv('machine/Texting/Sensors_3.csv')
t4 = pd.read_csv('machine/Texting/Sensors_4.csv')
t5 = pd.read_csv('machine/Texting/Sensors_5.csv')
t6 = pd.read_csv('machine/Texting/Sensors_6.csv')
t7 = pd.read_csv('machine/Texting/Sensors_7.csv')
t8 = pd.read_csv('machine/Texting/Sensors_8.csv')
t9 = pd.read_csv('machine/Texting/Sensors_9.csv')
t10 = pd.read_csv('machine/Texting/Sensors_10.csv')
texting = pd.concat([t1,t2,t3,t4,t5,t6,t7,t8,t9,t10], axis = 0)
s1 = pd.read_csv('machine/Swinging/Sensors_1.csv')
s2 = pd.read_csv('machine/Swinging/Sensors_2.csv')
s3 = pd.read_csv('machine/Swinging/Sensors_3.csv')
s4 =
|
pd.read_csv('machine/Swinging/Sensors_4.csv')
|
pandas.read_csv
|
"""Utilities for working with MTF data."""
import operator
from scipy.interpolate import griddata, RegularGridInterpolator as RGI
from .mathops import engine as e
from .util import share_fig_ax
from .io import read_trioptics_mtf_vs_field, read_trioptics_mtfvfvf
class MTFvFvF(object):
"""Abstract object representing a cube of MTF vs Field vs Focus data.
Attributes
----------
azimuth : `str`
Azimuth associated with the data
data : `numpy.ndarray`
3D array of data in shape (focus, field, freq)
field : `numpy.ndarray`
array of fields associated with the field axis of data
focus : `numpy.ndarray`
array of focus associated with the focus axis of data
freq : `numpy.ndarray`
array of frequencies associated with the frequency axis of data
"""
def __init__(self, data, focus, field, freq, azimuth):
"""Create a new MTFvFvF object.
Parameters
----------
data : `numpy.ndarray`
3D array in the shape (focus,field,freq)
focus : `iterable`
1D set of the column units, in microns
field : `iterable`
1D set of the row units, in any units
freq : `iterable`
1D set of the z axis units, in cy/mm
azimuth : `string` or `float`
azimuth this data cube is associated with
"""
self.data = data
self.focus = focus
self.field = field
self.freq = freq
self.azimuth = azimuth
def plot2d(self, freq, symmetric=False, contours=True, interp_method='lanczos', fig=None, ax=None):
"""Create a 2D plot of the cube, an "MTF vs Field vs Focus" plot.
Parameters
----------
freq : `float`
frequency to plot, will be rounded to the closest value present in the self.freq iterable
symmetric : `bool`
make the plot symmetric by mirroring it about the x-axis origin
contours : `bool`
plot contours
interp_method : `string`
interpolation method used for the plot
fig : `matplotlib.figure.Figure`, optional:
Figure to plot inside
ax : `matplotlib.axes.Axis`, optional:
Axis to plot inside
Returns
-------
fig : `matplotlib.figure.Figure`
figure containing the plot
axis : `matplotlib.axes.Axis`
axis containing the plot
"""
ext_x = [self.field[0], self.field[-1]]
ext_y = [self.focus[0], self.focus[-1]]
freq_idx = e.searchsorted(self.freq, freq)
# if the plot is symmetric, mirror the data
if symmetric is True:
dat = e.concatenate((self.data[:, ::-1, freq_idx], self.data[:, :, freq_idx]), axis=1)
ext_x[0] = ext_x[1] * -1
else:
dat = self.data[:, :, freq_idx]
ext = [ext_x[0], ext_x[1], ext_y[0], ext_y[1]]
fig, ax = share_fig_ax(fig, ax)
im = ax.imshow(dat,
extent=ext,
origin='lower',
cmap='inferno',
clim=(0, 1),
interpolation=interp_method,
aspect='auto')
if contours is True:
contours = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
cs = ax.contour(dat, contours, colors='0.15', linewidths=0.75, extent=ext)
ax.clabel(cs, fmt='%1.1f', rightside_up=True)
fig.colorbar(im, label=f'MTF @ {freq} cy/mm', ax=ax, fraction=0.046)
ax.set(xlim=(ext_x[0], ext_x[1]), xlabel='Image Height [mm]',
ylim=(ext_y[0], ext_y[1]), ylabel=r'Focus [$\mu$m]')
return fig, ax
def plot_thrufocus_singlefield(self, field, freqs=(10, 20, 30, 40, 50), _range=100, fig=None, ax=None):
"""Create a plot of Thru-Focus MTF for a single field point.
Parameters
----------
field : `float`
which field point to plot, in same units as self.field
freqs : `iterable`
frequencies to plot, will be rounded to the closest values present in the self.freq iterable
_range : `float`
+/- focus range to plot, symmetric
fig : `matplotlib.figure.Figure`, optional
Figure to plot inside
ax : `matplotlib.axes.Axis`
Axis to plot inside
Returns
-------
fig : `matplotlib.figure.Figure`, optional
figure containing the plot
axis : `matplotlib.axes.Axis`
axis containing the plot
"""
field_idx = e.searchsorted(self.field, field)
freq_idxs = [e.searchsorted(self.freq, f) for f in freqs]
range_idxs = [e.searchsorted(self.focus, r) for r in (-_range, _range)]
xaxis_pts = self.focus[range_idxs[0]:range_idxs[1]]
mtf_arrays = []
for idx, freq in zip(freq_idxs, freqs):
data = self.data[range_idxs[0]:range_idxs[1], field_idx, idx]
mtf_arrays.append(data)
fig, ax = share_fig_ax(fig, ax)
for data, freq in zip(mtf_arrays, freqs):
ax.plot(xaxis_pts, data, label=freq)
ax.legend(title=r'$\nu$ [cy/mm]')
ax.set(xlim=(xaxis_pts[0], xaxis_pts[-1]), xlabel=r'Focus [$\mu m$]',
ylim=(0, 1), ylabel='MTF [Rel. 1.0]')
return fig, ax
def trace_focus(self, algorithm='avg'):
"""Find the focus position in each field.
This is, in effect, the "field curvature" for this azimuth.
Parameters
----------
algorithm : `str`
algorithm to use to trace focus, currently only supports '0.5', see
notes for a description of this technique
Returns
-------
field : `numpy.ndarray`
array of field values, mm
focus : `numpy.ndarray`
array of focus values, microns
Notes
-----
Algorithm '0.5' uses the frequency that has its peak closest to 0.5
on-axis to estimate the focus coresponding to the minimum RMS WFE
condition. This is based on the following assumptions:
- Any combination of third, fifth, and seventh order spherical
aberration will produce a focus shift that depends on
frequency, and this dependence can be well fit by an
equation of the form y(x) = ax^2 + bx + c. If this is true,
then the frequency which peaks at 0.5 will be near the
vertex of the quadratic, which converges to the min RMS WFE
condition.
- Coma, while it enhances depth of field, does not shift the
focus peak.
- Astigmatism and field curvature are the dominant cause of any
shift in best focus with field.
- Chromatic aberrations do not influence the thru-focus MTF peak
in a way that varies with field.
Raises
------
ValueError
if an unsupported algorithm is entered
"""
if algorithm == '0.5':
# locate the frequency index on axis
idx_axis = e.searchsorted(self.field, 0)
idx_freq = abs(self.data[:, idx_axis, :].max(axis=0) - 0.5).argmin(axis=0)
focus_idx = self.data[:, e.arange(self.data.shape[1]), idx_freq].argmax(axis=0)
return self.field, self.focus[focus_idx],
elif algorithm.lower() in ('avg', 'average'):
if self.freq[0] == 0:
# if the zero frequency is included, exclude it from our calculations
avg_idxs = self.data.argmax(axis=0)[:, 1:].mean(axis=1)
else:
avg_idxs = self.data.argmax(axis=0).mean(axis=1)
# account for fractional indexes
focus_out = avg_idxs.copy()
for i, idx in enumerate(avg_idxs):
li, ri = int(e.floor(idx)), int(e.ceil(idx))
lf, rf = self.focus[li], self.focus[ri]
diff = rf - lf
part = idx % 1
focus_out[i] = lf + diff * part
return self.field, focus_out
else:
raise ValueError('0.5 is only algorithm supported')
def __arithmatic_bus__(self, other, op):
"""Core checking and return logic for arithmatic operations."""
if type(other) == type(self):
# both MTFvFvFs, check alignment of data
same_x = e.allclose(self.field, other.field)
same_y = e.allclose(self.focus, other.focus)
same_freq = e.allclose(self.freq, other.freq)
if not same_x and same_y and same_freq:
raise ValueError('x or y coordinates or frequencies mismatch between MTFvFvFs')
else:
target = other.data
elif type(other) in {int, float}:
target = other
else:
raise ValueError('MTFvFvFs can only be added to each other')
op = getattr(operator, op)
data = op(self.data, target)
return MTFvFvF(data, self.focus, self.field, self.freq, self.azimuth)
def __add__(self, other):
"""Add something to an MTFvFvF."""
return self.__arithmatic_bus__(other, 'add')
def __sub__(self, other):
"""Subtract something from an MTFvFvF."""
return self.__arithmatic_bus__(other, 'sub')
def __mul__(self, other):
"""Multiply an MTFvFvF by something."""
return self.__arithmatic_bus__(other, 'mul')
def __truediv__(self, other):
"""Divide an MTFvFvF by something."""
return self.__arithmatic_bus__(other, 'truediv')
def __imul__(self, other):
"""Multiply an MTFvFvF by something in-place."""
if type(other) not in {int, float}:
raise ValueError('can only mul by ints and floats')
self.data *= other
return self
def __itruediv__(self, other):
"""Divide an MTFvFvF by something in-place."""
if type(other) not in {int, float}:
raise ValueError('can only div by ints and floats')
self.data /= other
return self
@staticmethod
def from_dataframe(df):
"""Return a pair of MTFvFvF objects for the tangential and one for the sagittal MTF.
Parameters
----------
df : `pandas.DataFrame`
a dataframe with columns Focus, Field, Freq, Azimuth, MTF
Returns
-------
t_cube : `MTFvFvF`
tangential MTFvFvF
s_cube : `MTFvFvF`
sagittal MTFvFvF
"""
# copy the dataframe for manipulation
df = df.copy()
df.Fields = df.Field.round(4)
df.Focus = df.Focus.round(6)
sorted_df = df.sort_values(by=['Focus', 'Field', 'Freq'])
T = sorted_df[sorted_df.Azimuth == 'Tan']
S = sorted_df[sorted_df.Azimuth == 'Sag']
focus = e.unique(df.Focus.values)
fields = e.unique(df.Fields.values)
freqs = e.unique(df.Freq.values)
d1, d2, d3 = len(focus), len(fields), len(freqs)
t_mat = T.MTF.values.reshape((d1, d2, d3))
s_mat = S.MTF.values.reshape((d1, d2, d3))
t_cube = MTFvFvF(data=t_mat, focus=focus, field=fields, freq=freqs, azimuth='Tan')
s_cube = MTFvFvF(data=s_mat, focus=focus, field=fields, freq=freqs, azimuth='Sag')
return t_cube, s_cube
@staticmethod
def from_trioptics_file(file_path):
"""Create a new MTFvFvF object from a trioptics file.
Parameters
----------
file_path : path_like
path to a file
Returns
-------
`MTFvFvF`
new MTFvFvF object
"""
return MTFvFvF(**read_trioptics_mtfvfvf(file_path))
def mtf_ts_extractor(mtf, freqs):
"""Extract the T and S MTF from a PSF object.
Parameters
----------
mtf : `MTF`
MTF object
freqs : iterable
set of frequencies to extract
Returns
-------
tan : `numpy.ndarray`
array of tangential MTF values
sag : `numpy.ndarray`
array of sagittal MTF values
"""
tan = mtf.exact_tan(freqs)
sag = mtf.exact_sag(freqs)
return tan, sag
def mtf_ts_to_dataframe(tan, sag, freqs, field=0, focus=0):
"""Create a Pandas dataframe from tangential and sagittal MTF data.
Parameters
----------
tan : `numpy.ndarray`
vector of tangential MTF data
sag : `numpy.ndarray`
vector of sagittal MTF data
freqs : iterable
vector of spatial frequencies for the data
field : `float`
relative field associated with the data
focus : `float`
focus offset (um) associated with the data
Returns
-------
pandas dataframe.
"""
import pandas as pd
rows = []
for f, t, s in zip(freqs, tan, sag):
base_dict = {
'Field': field,
'Focus': focus,
'Freq': f,
}
rows.append({**base_dict, **{
'Azimuth': 'Tan',
'MTF': t,
}})
rows.append({**base_dict, **{
'Azimuth': 'Sag',
'MTF': s,
}})
return
|
pd.DataFrame(data=rows)
|
pandas.DataFrame
|
import pytest
import inspect
try:
import pandas as pd
import test_aide.pandas as ph
has_pandas = True
except ModuleNotFoundError:
has_pandas = False
@pytest.mark.skipif(not has_pandas, reason="pandas not installed")
def test_arguments():
"""Test arguments for arguments of test_aide.pandas._check_dfs_passed."""
expected_arguments = ["df_1", "df_2"]
arg_spec = inspect.getfullargspec(ph._check_dfs_passed)
arguments = arg_spec.args
assert len(expected_arguments) == len(
arguments
), f"Incorrect number of arguments -\n Expected: {len(expected_arguments)}\n Actual: {len(arguments)}"
assert (
expected_arguments == arguments
), f"Incorrect arguments -\n Expected: {expected_arguments}\n Actual: {arguments}"
default_values = arg_spec.defaults
assert (
default_values is None
), f"Unexpected default values -\n Expected: None\n Actual: {default_values}"
@pytest.mark.skipif(not has_pandas, reason="pandas not installed")
def test_exceptions_raised():
"""Test that the expected exceptions are raised by test_aide.pandas._check_dfs_passed."""
with pytest.raises(
TypeError, match=r"expecting first positional arg to be a pd.DataFrame.*"
):
ph._check_dfs_passed(1, pd.DataFrame())
with pytest.raises(
TypeError, match=r"expecting second positional arg to be a pd.DataFrame.*"
):
ph._check_dfs_passed(
|
pd.DataFrame()
|
pandas.DataFrame
|
import click
import os
import numpy as np
import pandas as pd
from scipy import sparse
from panopticon.dna import segmentation_to_copy_ratio_dict
from panopticon.utilities import get_valid_gene_info
# import loompy after the main packages, because sometimes it breaks packages that are imported further:
import loompy
def scrna_wizard_main():
""" """
filepath = click.prompt("Location of .loom file", type=click.Path('wb'))
filename = click.prompt("Name of .loom file")
if not filename.endswith('.loom'):
filename += '.loom'
matrixpath = click.prompt("Data/counts matrix (sparse npz or dense txt)",
type=click.File('rb'))
if matrixpath.name.endswith('.npz'):
matrix = sparse.load_npz(matrixpath)
elif (matrixpath.name.endswith('.csv')) or (matrixpath.name.endswith(
'.tsv')) or (matrixpath.name.endswith('.txt')):
hasheader = click.prompt('Does that file have a header?',
type=click.Choice(['n', 'y']),
default='n')
if (matrixpath.name.endswith('.csv')):
sep = ','
else:
sep = '\t'
if hasheader == 'n':
matrix = pd.read_table(matrixpath, header=None, sep=sep)
elif hasheader == 'y':
matrix =
|
pd.read_table(matrixpath, sep=sep)
|
pandas.read_table
|
#This script is to do kinetic classification.
#Make sure that you have setup your PYTHONPATH environment
#variable as described in the github repository.
from zipfile import ZIP_FILECOUNT_LIMIT
from isort import file
from SBMLKinetics import kinetics_classification
import sys
import numpy as np
import os
from sympy import *
from libsbml import * # access functions in SBML
import time
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
# Column names
SBMLID = "SBMLid"
CLASSIFICATIONS = 'Classifications'
REACTION = 'Reaction'
KINETICLAW = 'kinetic law'
PERCENTAGE = 'Percentage'
class KineticAnalyzer:
"""
Load Dataset of SBML files.
Args:
dataSet: str-"biomodels", "curated", "metabolic", "signalling", "homo_sapiens", "non_homo",
"cellular_organisms", "Mus_musculus", "Mammalia", "Saccharomyces_cerevisiae";
path: str-path to the file, with a format of ``D:\\path\\to``;
model_indices: range-(initial_model_indx, final_model_indx)
"""
def __init__(self, path = os.path.dirname(os.path.abspath(__file__)),
dataSet = "biomodels", model_indices = range(0,1000)):
#In addition to dataSetName, allow users to inmport a zip of sbml files from a path
initial_model_indx = min(model_indices)
final_model_indx = max(model_indices)
if type(dataSet) == str and dataSet in ["biomodels", "curated",
"metabolic", "signalling", "homo_sapiens", "non_homo",
"cellular_organisms", "Mus_musculus", "Mammalia", "Saccharomyces_cerevisiae"]:
zip_filename = dataSet + '.zip'
try:
self.tuple = kinetics_classification._dataSetStatistics(zip_filename = zip_filename,
initial_model_indx = initial_model_indx, final_model_indx = final_model_indx)
except Exception as err:
raise Exception (err)
elif '.zip' in dataSet:
try:
self.tuple = kinetics_classification._dataSetStatistics(data_dir = path, zip_filename = dataSet,
initial_model_indx = initial_model_indx, final_model_indx = final_model_indx)
except Exception as err:
raise Exception (err)
else:
raise Exception("Not a valid dataset input.")
def getKineticLawDistribution(self, path = "", fileName = ""):
"""
Get the kinetic law distribution (and save the dataframe into an excel file).
Args:
path: str-path to the file, with a format like ``D:/path/to/`` (or ``D:\\\path\\\ to\\\``)
fileName: str-file name with which the excel file save to, "" (do not save to excel file).
Returns:
df_gen_stat_final: dataFrame-kinetic law distribution.
The column names are: "Classifications", "Percentage", "Percentage standard error",
"Percentage per model", "Percentage per model standard error".
In the column of "Classifications", there are "ZERO", "UNDR", "UNMO", "BIDR", "BIMO",
"MM", "MMCAT", "HILL", "FR" and "NA" in detail.
"ZERO" means "Zeroth order", "UNDR" means "Uni-directional mass action", "UNMO" means
"Uni-term with moderator", "BIDR" means "Bi-directional mass action", "BIMO" means "Bi-
terms with moderator", "MM" means "Michaelis-Menten kinetics", "MMCAT" means "Michaelis-
Menten kinetics", "HILL" means "Hill equations", "FR" means kinetics in the format of
fraction other than MM, MMCAT and HILL, "NA" means not classified kinetics.
"""
(_, df_gen_stat, _, _, _, _, _) = self.tuple
df_gen_stat_final = df_gen_stat[["Classifications", "Percentage", "Percentage per model", \
"Percentage per model standard error"]]
try:
df_gen_stat_final.insert(2, "Percentage standard error", 0)
except:
pass
if fileName != "":
# Create a Pandas Excel writer using XlsxWriter as the engine.
path_fileName = path + fileName
writer = pd.ExcelWriter(path_fileName, engine='xlsxwriter')
df_gen_stat_final.to_excel(writer, sheet_name='general_statistics')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
return df_gen_stat_final
def TopFrequentKineticLawType(self):
"""
Return the most frequent kinetic law type on average in the loaded SBML dataset .
Returns:
kinetics_type_list: list pf kinetics_type.
kinetics_type: str-kinetic law type.
"""
(_, df_gen_stat, _, _, _, _, _) = self.tuple
df_gen_stat_plot = df_gen_stat[["Classifications", "Percentage", "Percentage per model", \
"Percentage per model standard error"]]
df_temp = df_gen_stat_plot
# try:
# kinetics_type_list = []
# max_idx = df_temp['Percentage'].idxmax()
# kinetics_type = df_temp['Classifications'][max_idx]
# kinetics_type_list.append(kinetics_type)
# except:
max_value = df_temp['Percentage'].max()
idx_list = df_temp.index[df_temp['Percentage'] == max_value].tolist()
kinetics_type_list =[]
for i in range(len(idx_list)):
kinetics_type_list.append(df_temp.iloc[idx_list[i]]["Classifications"])
return kinetics_type_list
def plotKineticLawDistribution(self, fileName = 'KineticLawDistribution.pdf'):
"""
Plot the kinetic law distribution as save it as a pdf file.
Args:
fileName: str-file name with which the pdf file save to.
"""
(_, df_gen_stat, _, _, _, _, _) = self.tuple
df_gen_stat_plot = df_gen_stat[["Classifications", "Percentage", "Percentage per model", \
"Percentage per model standard error"]]
try:
df_gen_stat_plot.insert(2, "Percentage standard error", 0)
except:
pass
yerr = df_gen_stat_plot[["Percentage standard error", \
"Percentage per model standard error"]].to_numpy().T
ax = df_gen_stat_plot.plot(kind="bar",x="Classifications", y=["Percentage","Percentage per model"],\
yerr=yerr, fontsize = 8)
ax.set_ylim(0.,1.)
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda y, p: str("{:.2%}".format(y))))
for p in ax.patches:
ax.annotate(str("{:.2%}".format(p.get_height())), (p.get_x() * 1.005, p.get_height() * 1.005), fontsize = 4)
#plt.show()
fig = ax.get_figure()
fig.savefig(fileName)
def getKineticLawDistributionPerMassTransfer(self, rct_num, prd_num, path = "", fileName = ""):
"""
Get the kinetic law distribution for the certein mass transfer
(and save the dataframe into an excel file).
Args:
rct_num: int-0, 1, 2, 3 (representing > 2).
prd_num: int-0, 1, 2, 3 (representing > 2).
path: str-path to the file, with a format like ``D:/path/to/`` (or ``D:\\\path\\\ to\\\``)
fileName: str-file name with which the excel file save to, "" (do not save to excel file).
Returns:
df_gen_stat_PR_final: dataFrame-the kinetic law distribution for a certain mass trasfer.
The column names are: "Classifications", "Percentage", "Percentage standard error",
"Percentage per model", "Percentage per model standard error".
In the column of "Classifications", there are "ZERO", "UNDR", "UNMO", "BIDR", "BIMO",
"MM", "MMCAT", "HILL", "FR" and "NA" in detail.
"ZERO" means "Zeroth order", "UNDR" means "Uni-directional mass action", "UNMO" means
"Uni-term with moderator", "BIDR" means "Bi-directional mass action", "BIMO" means "Bi-
terms with moderator", "MM" means "Michaelis-Menten kinetics", "MMCAT" means "Michaelis-
Menten kinetics", "HILL" means "Hill equations", "FR" means kinetics in the format of
fraction other than MM, MMCAT and HILL, "NA" means not classified kinetics.
"""
(_, df_gen_stat, _, df_gen_stat_PR, _, _, _) = self.tuple
df_gen_stat_plot = df_gen_stat[["Classifications", "Percentage", "Percentage per model", \
"Percentage per model standard error"]]
try:
df_gen_stat_plot.insert(2, "Percentage standard error", 0)
except:
pass
try:
df_gen_stat_PR.insert(2, "Percentage standard error", 0)
except:
pass
df_gen_stat_PR_plot = {}
types = len(df_gen_stat_plot)
if prd_num in [0,1,2,3] and rct_num in [0,1,2,3]:
i = prd_num*4 + rct_num
df_gen_stat_PR_plot[i] = pd.DataFrame(columns = df_gen_stat_PR.columns.tolist())
df_temp = df_gen_stat_PR[types*i:types*(i+1)]
df_gen_stat_PR_plot[i] = pd.concat([df_gen_stat_PR_plot[i],df_temp], ignore_index=True)
if fileName != "":
# Create a Pandas Excel writer using XlsxWriter as the engine.
path_fileName = path + fileName
writer = pd.ExcelWriter(path_fileName, engine='xlsxwriter')
df_gen_stat_PR_plot[i].to_excel(writer, sheet_name='general_statistics_PR')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
df_gen_stat_PR_final = df_gen_stat_PR_plot[i]
return df_gen_stat_PR_final
else:
raise Exception("Not a valid reactant or product number.")
def TopFrequentKineticLawTypePerMassTransfer(self, rct_num, prd_num):
"""
Return the most frequent kinetic law type on average for a certain mass transfer
in the loaded SBML dataset .
Args:
rct_num: int-0, 1, 2, 3 (representing > 2).
prd_num: int-0, 1, 2, 3 (representing > 2).
Returns:
kinetics_type_list: list pf kinetics_type.
kinetics_type: str-kinetic law type.
"""
(_, df_gen_stat, _, df_gen_stat_PR, _, _, _) = self.tuple
df_gen_stat_plot = df_gen_stat[["Classifications", "Percentage", "Percentage per model", \
"Percentage per model standard error"]]
try:
df_gen_stat_plot.insert(2, "Percentage standard error", 0)
except:
pass
try:
df_gen_stat_PR.insert(2, "Percentage standard error", 0)
except:
pass
df_gen_stat_PR_plot = {}
types = len(df_gen_stat_plot)
if prd_num in [0,1,2,3] and rct_num in [0,1,2,3]:
i = prd_num*4 + rct_num
df_gen_stat_PR_plot[i] = pd.DataFrame(columns = df_gen_stat_PR.columns.tolist())
df_temp = df_gen_stat_PR[types*i:types*(i+1)]
df_gen_stat_PR_plot[i] = pd.concat([df_gen_stat_PR_plot[i],df_temp], ignore_index=True)
df_temp = df_gen_stat_PR_plot[i]
# try:
# kinetics_type_list = []
# max_idx = df_temp['Percentage'].idxmax()
# kinetics_type = df_temp['Classifications'][max_idx]
# kinetics_type_list.append(kinetics_type)
# except:
max_value = df_temp['Percentage'].max()
idx_list = df_temp.index[df_temp['Percentage'] == max_value].tolist()
kinetics_type_list =[]
for i in range(len(idx_list)):
kinetics_type_list.append(df_temp.iloc[idx_list[i]]["Classifications"])
return kinetics_type_list
else:
raise Exception("Not a valid reactant or product number.")
def plotKineticLawDistributionPerMassTransfer(self, rct_num, prd_num,
fileName = "KineticLawDistributionPerMassTransfer.pdf"):
"""
Plot the kinetic law distribution for the certain mass transfer.
Args:
rct_num: int - 0, 1, 2, 3 (representing > 2)
prd_num: int - 0, 1, 2, 3 (representing > 2)
fileName: str-file name with which the pdf file save to.
"""
(_, df_gen_stat, _, df_gen_stat_PR, _, \
df_table_PR, df_table_PR_per_model) = self.tuple
#generate the PR two tables
try:
df_table_PR_plot = df_table_PR.div(df_table_PR.sum().sum())
df_table_PR_per_model_plot = df_table_PR_per_model.div(df_table_PR_per_model.sum().sum())
except Exception as e:
raise Exception(e)
df_gen_stat_plot = df_gen_stat[["Classifications", "Percentage", "Percentage per model", \
"Percentage per model standard error"]]
try:
df_gen_stat_plot.insert(2, "Percentage standard error", 0)
except:
pass
try:
df_gen_stat_PR.insert(2, "Percentage standard error", 0)
except:
pass
df_gen_stat_PR_plot = {}
types = len(df_gen_stat_plot)
if prd_num in [0,1,2,3] and rct_num in [0,1,2,3]:
i = prd_num*4 + rct_num
df_gen_stat_PR_plot[i] = pd.DataFrame(columns = df_gen_stat_PR.columns.tolist())
df_temp = df_gen_stat_PR[types*i:types*(i+1)]
df_gen_stat_PR_plot[i] = pd.concat([df_gen_stat_PR_plot[i],df_temp], ignore_index=True)
yerr = df_gen_stat_PR_plot[i][["Percentage standard error", \
"Percentage per model standard error"]].to_numpy().T
ax = df_gen_stat_PR_plot[i].plot(kind="bar",x="Classifications", y=["Percentage","Percentage per model"],\
yerr=yerr, legend = None, fontsize = 8)
ax.set_ylim(0.,1.)
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda y, p: str("{:.2%}".format(y))))
for p in ax.patches:
ax.annotate(str("{:.2%}".format(p.get_height())), (p.get_x() * 1.005, p.get_height() * 1.005), fontsize = 4)
#plt.show()
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda y, p: str("{:.2%}".format(y))))
ax.annotate('%s'%"{:.2%}".format(df_table_PR_plot.iat[i//4, i%4]), xy=(0, .9), color = 'dodgerblue')
if i//4 == 3 and i % 4 != 3:
ax.annotate('P > %d, R = %d'%(2, i%4), xy=(3, .9))
elif i//4 != 3 and i % 4 == 3:
ax.annotate('P = %d, R > %d'%(i//4, 2), xy=(3, .9))
elif i//4 == 3 and i % 4 == 3:
ax.annotate('P > %d, R > %d'%(2, 2), xy=(3, .9))
else:
ax.annotate('P = %d, R = %d'%(i//4, i%4), xy=(3, .9))
ax.annotate('%s'%"{:.2%}".format(df_table_PR_per_model_plot.iat[i//4, i%4]), xy=(7., .9), color = 'darkorange')
fig = ax.get_figure()
handles, labels = fig.axes[-1].get_legend_handles_labels()
fig.legend(handles, labels, loc='upper center')
fig.savefig(fileName)
else:
raise Exception("Not a valid reactant or product number.")
def plotKineticLawDistributionVsMassTransfer(self, fileName = 'KineticLawDistributionVsMassTransfer.pdf'):
"""
Plot the kinetic law distribution vs each type of mass transfer.
Args:
fileName: str-file name with which the pdf file save to.
"""
(_, df_gen_stat, _, df_gen_stat_PR, _, \
df_table_PR, df_table_PR_per_model) = self.tuple
#generate the PR two tables
try:
df_table_PR_plot = df_table_PR.div(df_table_PR.sum().sum())
df_table_PR_per_model_plot = df_table_PR_per_model.div(df_table_PR_per_model.sum().sum())
except Exception as e:
raise Exception(e)
df_gen_stat_plot = df_gen_stat[["Classifications", "Percentage", "Percentage per model", \
"Percentage per model standard error"]]
try:
df_gen_stat_plot.insert(2, "Percentage standard error", 0)
except:
pass
try:
df_gen_stat_PR.insert(2, "Percentage standard error", 0)
except:
pass
df_gen_stat_PR_plot = {}
types = len(df_gen_stat_plot)
fig = plt.figure(figsize = (16,16))
axes = fig.subplots(nrows=4, ncols=4)
for i in range(16):
df_gen_stat_PR_plot[i] = pd.DataFrame(columns = df_gen_stat_PR.columns.tolist())
df_temp = df_gen_stat_PR[types*i:types*(i+1)]
df_gen_stat_PR_plot[i] =
|
pd.concat([df_gen_stat_PR_plot[i],df_temp], ignore_index=True)
|
pandas.concat
|
"""
tests class collects all the methods that are test specific
they need to be single thread and MP compatible
test structure is always:
- self.initialize_test()
- self.prepare_test()
- res = self.run_test()
- self.collect_res(res)
- self.save_full_output()
- self.save_ind_output()
- self.save_output()
- self.write_report()
- self.end_test()
For non MP use, there are helper functions that take care of iterating over the items:
- self.initialize_test()
- self.prepare_test()
- res = self.run_test_helper()
- self.collect_res(res)
- self.save_full_output_helper()
- self.save_ind_output_helper()
- self.save_output()
- self.write_report()
- self.end_test()
todo:
- check all run with tracking for appropriate reload setting
Version 0.3
Update 30.07.18/sk
"""
import os
import pandas as pd
import numpy as np
import numpy.random as rand
from tb.basetest import Test
from tb.tb_backend.run import Run
from tb.tb_backend.savingpipe import Plass
import re
import scipy.optimize as opt
import itertools
from ast import literal_eval
from tb.tb_backend.report import Report
import pickle
from configparser import ConfigParser
class Sensitivity(Test):
"""
usage:
from tb.tests import Sensitivity
from tb.tb_backend.savingpipe import Plass
folder = r'C:\code\testingbattery\FOLDER'
test = Sensitivity(folder,'MODELNAME.py',0.1)
test.initialize_test()
test.prepare_test()
res = test.run_test_helper()
test.collect_res(res)
# add the saving pipe stuff
"""
def __init__(self, folder, file_name, sensitivity_percentage):
super(Sensitivity, self).__init__(folder, file_name, 'sensitivity')
self.err_list = []
self.MP = True
self.sp = sensitivity_percentage
self.class_name = 'Sensitivity'
# this needs to be integrated into the test definition in the battery and the builder
self.equimode = False
self.cf = ConfigParser()
# config folder doesn't change, so it's put here to it's on one line
self.cf.read(os.path.join(os.path.split(folder)[0], '_config', 'tb_config.ini'))
# this should go to saving pipe
self.nmb_heatmaps = self.cf['saving pipe settings'].getint('nmb_heatmaps', fallback=4)
def set_equimode(self, equimode=False):
"""
Deprecated 27.07.18/sk
Not in use 27.07.18/sk
:param equimode:
:return:
"""
# this really needs to be tested
# this should not be necessary anmyore 02.07.18/sk
self.equimode = equimode
def set_base_params(self):
"""
Setting of base parameters for the base run
:return:
"""
if self.equimode:
self.base_params = self.const['equi']
else:
self.base_params = self.const['value']
self.base_builtin = self.builtin['value']
def prepare_test(self):
"""
Prepares the sensitivity runs and adds them to the run list
:return:
"""
# creates one run for positive and one for negative sensitivity
sp_lst = [self.sp * 1, self.sp * -1]
for sp in sp_lst:
# positive and negative sensitivity get a full df each
self.model.create_full_df(self.base.run, sp)
for i, row in self.const.iterrows():
name = '%s_%s' % (row['Real Name'], sp)
w_params = self.base_params.copy()
w_params.iloc[i] *= (1 + sp)
# Run has inputs name,full_ID,exo_names=None,params=None,return_columns=None
self.run_lst.append(Run(name, sp, self.exo_names, w_params, self.endo_names,
'%s=%s' % (row['Real Name'], w_params.iloc[i])))
def collect_res(self, res):
"""
collects the results from the test execution and prepares them for further use
sens has additional sensitivity calcs for each run
:param res: result list from run execution
"""
for i, run in enumerate(self.run_lst):
err_lst = res[i][2]
if not res[i][1].empty:
# this should be eliminated in a revision, results should come in as a list of run objects 250718/sk
run.run = res[i][1].astype('float64', copy=False)
# run.chk_run() tests if there are np.nan in the first line,
# which means the run couldn't be executed properly and shouldn't be added
# those runs should technically not even show up (some do, some don't)
# topic to discuss with PySD 180722/sk
if run.chk_run():
self.model.add_run(run.run, run.name, run.full_id)
run.treat_run(self.base.run)
else:
# we remove negative stock and flow errors here because those runs are not supposed
# to have run in the first place
# negative stock and flow errors in this case arise from np.inf in some variables
# caused by division by 0
# while they technically should be fine, it's just confusing for anyone to have an error
# other than the division by 0
err_lst = [x for x in res[i][2] if x[1] not in ['Negative Flow', 'Negative Stock']]
# print is just for testing
print(i)
self.model.err_lst.extend(err_lst)
# opening pipes all over the place might be not the best idea, one pipe for all saving might be better
pipe = Plass(self)
for key, full_df in self.model.full_df_dict.items():
pipe.save_csv(full_df, 'full_df', key)
def save_ind_output_mp(self, run):
"""
:param run:
"""
pipe = Plass(self)
pipe.create_plot(run.run, 'run', run.name)
pipe.create_plot(run.norm, 'norm', run.name)
pipe.create_plot(run.sens, 'exo_sens', run.name)
if self.testing_mode:
pipe.save_csv(run.run, 'run', run.name)
pipe.save_csv(run.norm, 'norm', run.name)
pipe.save_csv(run.sens, 'exo_sens', run.name)
def save_full_output_mp(self, key):
"""
Overwrite because for sens we need endo run and endo sens graphs and models
:param key: key for the full_df
:return:
"""
full_df = pd.read_csv(os.path.join(self.folder_dict[self.test], 'full_df_%s.csv' % key), index_col=0,
header=[0, 1], dtype=np.float64)
self.iterate_endo_plots(key, full_df)
pipe = Plass(self)
# this shouldn't be necessary anymore 26.07.18/sk
# full_df = full_df.astype(float)
pipe.create_heatmap(key, full_df, self.nmb_heatmaps)
if self.testing_mode:
try:
pipe.create_anim_heatmap(key, full_df)
# define the exception thrown here
except:
pass
# probably need two iterations, one for endo sens and one for endo run, exos are not handled in a model
pipe.create_model(key, full_df, 'endo_run')
pipe.create_model(key, full_df, 'endo_sens')
# this should not be necessary anymore 30.07.18/sk
#if self.full_df_output:
# pipe.save_csv(full_df, 'full_df', key)
def save_endo_plots(self, endo_run, unit, name):
"""
:param endo_run:
:param unit:
:param name:
"""
# type name now includes the prefix, if necessary
pipe = Plass(self)
type_name = 'endo_run'
pipe.create_sens_plot(endo_run, unit, name, type_name)
if self.testing_mode:
pipe.save_csv(endo_run, type_name, name)
# this transpose shouldn't be necessary, but division by first column doesn't seem to work
endo_run = endo_run.transpose()
endo_sens = (endo_run - endo_run.iloc[0]) / endo_run.iloc[0]
endo_sens = endo_sens.transpose()
type_name = 'endo_sens'
pipe.create_sens_plot(endo_sens, unit, name, type_name)
if self.testing_mode:
pipe.save_csv(endo_sens, type_name, name)
def write_report(self):
"""
Writing the report, inputs come from pickle files
For sensitivity we need the intervals pickle (for the heatmaps) as well as the endo_its and exo_its
for multiple graphs of the same variable
"""
rep = Report(self.folder, self.file)
const_lst = self.const['Real Name'].tolist()
# we have to pickle this because with MP, the passing of arguments is faulty
f_path = os.path.join(self.folder_dict[self.test], 'intervals.pickle')
pickle_in = open(f_path, 'rb')
intervals = pickle.load(pickle_in)
pickle_in.close()
os.remove(f_path)
# endo its are the iterations for endogenous graphs
f_path = os.path.join(self.folder_dict[self.test], 'endo_its.pickle')
pickle_in = open(f_path, 'rb')
endo_its = pickle.load(pickle_in)
pickle_in.close()
os.remove(f_path)
# exo its are the iterations for exogenous graphs
f_path = os.path.join(self.folder_dict[self.test], 'exo_its.pickle')
pickle_in = open(f_path, 'rb')
exo_its = pickle.load(pickle_in)
pickle_in.close()
os.remove(f_path)
# report tuple includes section title, constant list, sensitivity percentage, intervals for the heatmap,
# exogenous and endogenous iterations, link to test source folder
rep_tpl = (self.class_name, const_lst, self.sp, intervals, exo_its, endo_its,
self.folder_dict[self.test].replace(self.folder_dict['source'], '').lstrip('\\'))
rep.write_sens(rep_tpl)
rep.save_report()
class MonteCarlo(Test):
"""
Monte Carlo is a subclass of test and runs the MC testing
"""
def __init__(self, folder, file_name, sensitivity_percentage, runs):
super(MonteCarlo, self).__init__(folder, file_name, 'montecarlo')
self.err_list = []
self.MP = True
self.sp = sensitivity_percentage
self.nmb_runs = runs
self.class_name = 'MonteCarlo'
def prepare_test(self):
"""
Prepares the runs and adds them to the run list
Creates 100 random uniform runs for each parameter
"""
for i, row in self.const.iterrows():
self.model.create_full_df(self.base.run, row['Real Name'])
if self.base_params.iloc[i] != 0:
input_set = rand.uniform((1 - self.sp) * self.base_params.iloc[i],
(1 + self.sp) * self.base_params.iloc[i],
self.nmb_runs)
else:
input_set = np.full(1, 0)
for j in np.nditer(input_set):
name = '%s_%s' % (row['Real Name'], j)
w_params = self.base_params.copy()
w_params.iloc[i] = j
# Run has inputs name,full_ID,exo_names=None,params=None,return_columns=None
self.run_lst.append(Run(name, row['Real Name'], self.exo_names, w_params, self.endo_names,
'%s=%s' % (row['Real Name'], w_params.iloc[i]), reload=True))
w_params = self.base_params.copy()
w_params.iloc[i] *= (1 - self.sp)
self.run_lst.append(Run('floor', row['Real Name'], self.exo_names, w_params, self.endo_names,
'%s=%s' % (row['Real Name'], w_params.iloc[i]), reload=True))
w_params = self.base_params.copy()
w_params.iloc[i] *= (1 + self.sp)
self.run_lst.append(Run('ceiling', row['Real Name'], self.exo_names, w_params, self.endo_names,
'%s=%s' % (row['Real Name'], w_params.iloc[i]), reload=True))
def save_full_output_mp(self, key):
"""
:param key:
"""
full_df = pd.read_csv(os.path.join(self.folder_dict[self.test], 'full_df_%s.csv' % key), index_col=0,
header=[0, 1], dtype=np.float64)
pipe = Plass(self)
full_df = full_df.astype(float)
self.iterate_endo_plots(key, full_df)
if self.full_df_output:
pipe.save_csv(full_df, 'full_df', key)
pipe.create_model(key, full_df, self.test)
def save_endo_plots(self, endo_run, unit, name):
"""
:param endo_run:
:param unit:
:param name:
"""
pipe = Plass(self)
# type name now includes the prefix, if necessary
type_name = self.test
pipe.create_mc_plot(endo_run, unit, name, type_name)
def write_report(self):
"""
Writes the report for the MC test
doesn't need any pickled information
"""
rep = Report(self.folder, self.file)
const_lst = self.const['Real Name'].tolist()
# report tuple includes section title, constant list, MC percentage, link to test source
rep_tpl = (self.class_name, const_lst, self.sp,
self.folder_dict[self.test].replace(self.folder_dict['source'], '').lstrip('\\'))
rep.write_mc(rep_tpl)
rep.save_report()
class Equilibrium(Test):
"""
Saving of plots is generic (from Test class)
"""
def __init__(self, folder, file_name, equi_method, increment_percentage, incremental=True):
super(Equilibrium, self).__init__(folder, file_name, 'equilibrium')
self.err_list = []
self.MP = False
self.sp = increment_percentage
self.set_inc = incremental
self.equi_method = equi_method
self.class_name = 'Equilibrium'
# sum df is summarizing the equi conditions found
self.sum_df = None
self.equi_set = {}
self.equi_excl = []
self.cf = ConfigParser()
# config folder doesn't change, so it's put here to it's on one line
self.cf.read(os.path.join(os.path.split(folder)[0], '_config', 'tb_config.ini'))
self.equi_precision = self.cf['test parameters'].getfloat('equi_precision', fallback=0.01)
self.cf.read(os.path.join(os.path.split(folder)[0], '_config', 'settings.ini'))
self.equi_res = self.cf['tests'].getfloat('equi_res', fallback=0.1)
self.equi_iter = self.cf['tests'].getfloat('equi_iter', fallback=0)
self.equi_maxiter = self.cf['tests'].getint('equi_maxiter', fallback=20)
def initialize_test(self, equimode=False):
"""
:param equimode:
"""
self.initialize_base()
self.read_equi_file()
self.op_flows()
def read_equi_file(self):
"""
Equi file is the file where the user inputs concerning the equilibrium test are stored
"""
equi_file = '%s_equi.csv' % self.out_name
equi_doc = pd.read_csv(os.path.join(self.folder_dict['doc'], equi_file), index_col=0)
for i, row in equi_doc.iterrows():
self.equi_set[row['Py Name']] = (row['fix value'], row['global minimum'], row['global maximum'])
# if the value is fixed, its name is added to the excluded list
if not np.isnan(row['fix value']):
self.equi_excl.append(row['Py Name'])
# equilbrium function
def equilibrium(self, param_lst):
"""
:param param_lst:
:return:
"""
name = ''
run = Run(name, self.test, self.exo_names, param_lst, self.endo_names)
args = run, self.flow_names, self.stock_names, self.test_name
_, res, errors = self.model.run_with_tracking(args)
equi = self.calc_equi(res)
# runtime errors are tracked in the model class
self.model.err_lst.extend(errors)
return equi
def collect_equi(self, name, equi_df, ts, index_lst):
"""
recursively groups all equilibria conditions for the stocks and the model
:param name: name of source, equi or base
:param equi_df: dataframe of the run
:param ts: timestep of the model
:param index_lst: list with indices where there is an equilibrium condition
:return:
"""
cut_off = None
ending_ts = None
# while there are time steps in the index list, we continue
if index_lst:
initial_ts = index_lst[0]
# if the length of the list is just 1 element, we have to capture that otherwise we get a max recursion
# depth error
if len(index_lst) > 1:
# here we search forward until we find a time step that is after a gap
for i, index in enumerate(index_lst):
if i > 0:
if index_lst[i] - index_lst[i - 1] != ts:
ending_ts = index_lst[i - 1]
cut_off = i
break
if ending_ts is None:
ending_ts = index_lst[-1]
index_lst = []
else:
ending_ts = initial_ts
index_lst = []
# here we prepare the next iteration of the index list, if it's empty, it will stay empty
index_lst = index_lst[cut_off:]
st_lst = equi_df[self.stock_names].loc[initial_ts].tolist()
sum_dict = {'name': name, 'start': initial_ts, 'end': ending_ts}
for i, value in enumerate(self.stock_names):
sum_dict[value] = st_lst[i]
self.sum_df = self.sum_df.append(sum_dict, ignore_index=True)
return self.collect_equi(name, equi_df, ts, index_lst)
else:
return
def src_equi(self, run, name):
"""
:param run: dataframe to search equilibrium conditions in
:param name: name of the run
"""
# we start off by adding the stocks to the equi_df because we need them for the initial conditions
equi_df = run[self.stock_names]
# equi_df = pd.concat([equi_df,run[self.flow_names]],axis=1)
# iterates through the first level of the list with the flow expressions
# tot res will be a pd.Series
tot_res = 0
for i, expr in enumerate(self.flow_expr_lst):
st_res = 0
st_name = self.stock_names[i]
# iterates through the different elements in the flow expressions
for j, el in enumerate(expr):
if el not in ['+', '-', '']:
if expr[j - 1] == '-':
st_res -= run[el]
else:
st_res += run[el]
st_res.name = 'sum_%s' % st_name
# the threshold for equilibria is set at 0.01
st_res[st_res.abs() < self.equi_precision] = 0
equi_df = pd.concat([equi_df, st_res], axis=1)
tot_res += st_res.abs()
tot_res.name = 'model'
equi_df = pd.concat([equi_df, tot_res], axis=1)
self.save_csv('equi_df_%s' % name, equi_df, self.test)
index_lst = equi_df.loc[equi_df['model'] == 0].index.tolist()
ts = self.builtin['value'][2]
self.collect_equi(name, equi_df, ts, index_lst)
if name == 'base':
# this creates the df for the time line for the report
self.base_equi_df = equi_df
self.base_equi_df.drop(self.stock_names, axis=1, inplace=True)
self.base_equi_df[self.base_equi_df != 0] = np.nan
self.base_equi_df[self.base_equi_df == 0] = 1
def calc_equi(self, res):
"""
calculates the equilbrium result for initialization
first calculates the sum of the flows for each stock
then calculates the sum of absolute sums
this sum needs to be 0 for an equilibrium to exist
:param res: OptimizeResult object from scipy.optimize.minimize
:return: sum of absolute sums
"""
tot_res = 0
# iterates through level 1 of list of lists
for expr in self.flow_expr_lst:
st_res = 0
# iterates through level 2 of the list of lists
for i, el in enumerate(expr):
# empty string needs to be in selection because if the first flow is negative, it will add an
# empty string element to the expr (which is a list of strings)
if el not in ['+', '-', '']:
out = res[el]
if expr[i - 1] == '-':
out = -out
# calculates the stock result
st_res += out
tot_res += sum(abs(st_res))
return tot_res
def op_flows(self):
"""
extracts the flows for the equilibrium calculation
flow expressions are stored with the associated stocks in the stocks dataframe,
thus having operations and names in the expression
:return: list of lists with the flow expressions split up
"""
self.flow_expr_lst = []
for i, row in self.stocks.iterrows():
flow_expr = row['flow expr']
# split on + and - (only operations allowed in stocks) and keep the operator in the list
flow_expr = re.split(r'([+-])', flow_expr)
# strip all expressions to make sure that there are no errors due to spaces still in the thing
flow_expr = [s.strip() for s in flow_expr]
self.flow_expr_lst.append(flow_expr)
def create_init_bounds(self):
"""
# this has to go to equilbrium test
incremental=True indicates that equilibria closer to the base run are searched,
is more time intensive than incremental = false
creates the initial bounds for the equilibrium function
even with incremental = False equilibria can still found incrementally as even with very large max_equi bounds,
there is the possibility that incrementally the bounds are increased, but it's unlikelier
:return: list of tuples with the bounds for each exogenous variable in the model
"""
self.bound_lst = []
for i, name in enumerate(self.exo_names):
if name in self.equi_excl:
self.base_params.iloc[i] = self.equi_set[name][0]
if self.set_inc:
for i, value in self.base_params.iteritems():
# if values are 0 at t0 they need to be manually set to an arbitrary bounds, otherwise they won't change
# not sure how to set them effectively
if self.exo_names[i] in self.equi_excl:
self.bound_lst.append((self.equi_set[self.exo_names[i]][0], self.equi_set[self.exo_names[i]][0]))
else:
if value == 0:
self.bound_lst.append((0, 1))
else:
bounds = (value * (1 - self.sp), value * (1 + self.sp))
self.bound_lst.append(bounds)
def build_bounds(self):
"""
# this has to go to equilbrium test
updates the bounds for each iteration of the solver
method one increases the bounds based on the initial parameter value from the base run
method two increases the bounds based on the result of the equilibrium function
:return: updated bounds list, parameters for next iteration
"""
if self.equi_method == 1:
for i, var in enumerate(self.res_eq.x):
if self.exo_names[i] not in self.equi_excl:
lb, ub = self.bound_lst[i]
# again we need to check if the initial is 0, then changed it to the result for bounds calculation
if self.base_params.loc[i] == 0:
# if initial parameter is zero, parameter is handled as if method 2
# even though method 1 is selected
# except that the applied space here is dependent on iter_cnt
value = var
else:
value = self.base_params.loc[i]
if lb == var:
lb = value * (1 - self.iter_cnt * self.sp)
elif ub == var:
ub = value * (1 + self.iter_cnt * self.sp)
if lb < self.equi_set[self.exo_names[i]][1]:
lb = self.equi_set[self.exo_names[i]][1]
if ub > self.equi_set[self.exo_names[i]][2]:
ub = self.equi_set[self.exo_names[i]][2]
self.bound_lst[i] = (lb, ub)
self.equi_params = self.res_eq.x
elif self.equi_method == 2:
for i, var in enumerate(self.res_eq.x):
if self.exo_names[i] not in self.equi_excl:
lb = var * (1 - self.sp)
ub = var * (1 + self.sp)
self.bound_lst[i] = (lb, ub)
self.equi_params = self.res_eq.x
else:
pass
def write_equi_to_doc(self, equi_dict):
# this has to go to the equilibrium test
"""
saves the equilbrium result to the doc file
the equidict used here has all exogenous variables and for each either a number value, NE (No Equilbrium)
, or BE (Bad Equlibrium)
:param equi_dict: dictionary from the equilibrium test output, used to create the equi runs
:return: saved .csv
"""
for key, val in equi_dict.items():
self.doc.loc[self.doc['Py Name'] == key, 'equi'] = val
return self.save_csv('%s_doc' % self.out_name, self.doc, 'doc')
def create_run_with_result(self, result):
"""
creates a run with the results of some function, does not need to pass exo names because exo names are
global in here
:param result: list or series with parameter settings
:return: df with the resulting run (endogenous variables)
"""
run = Run('res_eq', 'equi', self.exo_names, result, self.endo_names)
res = self.model.run(params=run.input_dict, return_columns=run.return_columns)
run.run = res
return run
def check_equilibrium(self):
"""
# this needs to go to equilibrium test
this function checks the result of the equilibrium function and adjusts it if not all conditions for
a good equilibrium are met
if the sum for the equilibrium is 0, but the sum of all flows is 0, then an equilibrium was found,
but it is just by setting all parameters to 0, thus making it impossible to use for other tests,
thus the values are changed to BE, bad equilibrium
if the result of the equilibrium function is larger than 0.1, then no equilibrium could be found, thus changing
the values to NE, no equilibrium
it is possible that no equilibrium is found because the while loop of the equilibrium function exists due to
improvement being 0 even tough an equilibrium might be possible, but I don't know how to fix that
:return: the updated dictionary with equi values (or NE, BE)
"""
equi_dict = dict(zip(self.exo_names, self.res_eq.x))
self.eq_res = 'GE'
if self.eq_run.run[self.flow_names].iloc[0].sum(axis=0) == 0:
for key, val in equi_dict.items():
equi_dict[key] = 'BE'
self.eq_res = 'BE'
if self.res_eq.fun > 0.1:
for key, val in equi_dict.items():
equi_dict[key] = 'NE'
self.eq_res = 'NE'
return equi_dict
def prepare_test(self):
"""
For the equilibrium test there is no need for a run list as they are not passed through MP
"""
if self.set_inc:
self.create_init_bounds()
self.res_lst = []
self.equi_params = self.base_params
self.iter_cnt = 1
def run_test(self):
"""
run test is the alternative for run with MP and collect res
Equilibrium is currently the only test using it
"""
# first optimizer run is executed to estalish a starting point
# if not incremental, no bounds are necessary
if self.set_inc:
self.res_eq = opt.minimize(self.equilibrium, self.equi_params, bounds=self.bound_lst)
else:
self.res_eq = opt.minimize(self.equilibrium, self.equi_params)
# results are gathered to document the initial search
self.eq_run = self.create_run_with_result(self.res_eq.x)
self.eq_run.calc_fit(self.base.run)
self.res_lst.append((self.res_eq.fun, self.eq_run.fit, self.res_eq.x))
# self improv is set to 1 to make sure it continues
self.improv = 1
while self.res_eq.fun > self.equi_res and self.improv > self.equi_iter:
self.iter_cnt += 1
# just a bit of reporting that things aren't hanging
print('start', self.iter_cnt)
if self.set_inc:
# updating the bounds
self.build_bounds()
self.res_eq = opt.minimize(self.equilibrium, self.equi_params, bounds=self.bound_lst)
else:
self.res_eq = opt.minimize(self.equilibrium, self.equi_params)
# gathering the results again
self.eq_run = self.create_run_with_result(self.res_eq.x)
self.eq_run.calc_fit(self.base.run)
self.res_lst.append((self.res_eq.fun, self.eq_run.fit, self.res_eq.x))
# calculates the difference between the last two iterations to set to equilibrium,
# is -2 and -1 because the index in the list is one behind the count
self.improv = self.res_lst[self.iter_cnt - 2][0] - self.res_lst[self.iter_cnt - 1][0]
# if equilibrium is not found after 20 iterations, we should move on
if self.iter_cnt == self.equi_maxiter:
break
self.model.create_full_df(self.base.run, self.test)
self.model.add_run(self.eq_run.run, 'equilibrium run', self.test)
# creating the full df to avoid issues with large dfs in MP (which is not the case here)
pipe = Plass(self)
for key, full_df in self.model.full_df_dict.items():
pipe.save_csv(full_df, 'full_df', key)
def save_output(self):
"""
Saving the output from the equilibrium test
"""
# this is all the output that doens't got through MP
self.save_lst_csv(self.res_lst, 'equi_sum_%s' % self.out_name, 'equi',
columns=['equilibrium result', 'error to base', 'parameters'], append=False)
# this is for the search of equilibrium conditions in the base and equi run
self.sum_df = pd.DataFrame(columns=['name', 'start', 'end'].extend(self.stock_names))
self.src_equi(self.base.run, 'base')
self.src_equi(self.eq_run.run, 'equi')
# sum df could be empty if no equilibrium condition has been found
if not self.sum_df.empty:
order = ['name', 'start', 'end']
order.extend(self.stock_names)
self.sum_df = self.sum_df[order]
self.sum_df.to_csv(os.path.join(self.folder_dict[self.test], 'equi_sum.csv'))
exo_r_dict = self.check_equilibrium()
# testing feature to compare the found equilibria between models
equi_rep = [[self.res_eq.fun, self.eq_run.fit, self.res_eq.x, self.iter_cnt]]
equi_db = pd.DataFrame(equi_rep)
with open(os.path.join(self.folder, 'equidoc.csv'), 'a') as f:
equi_db.to_csv(f, header=False)
self.write_equi_to_doc(exo_r_dict)
pipe = Plass(self)
# since equi is not going through MP, the model creation is called here a bit differently
pipe.create_model('equi', self.model.full_df_dict['equi'], self.test)
pipe.create_timeline(self.base_equi_df, 'equi_base')
def write_report(self):
"""
writing the report for the equilibrium test
"""
rep = Report(self.folder, self.file)
# we don't need the its here, but we need to get rid of the pickle file
f_path = os.path.join(self.folder_dict[self.test], 'endo_its.pickle')
os.remove(f_path)
equi_doc = self.doc.loc[self.doc['equi'].notnull()]
# report tuple includes section title, equilibrium result, equilibrium settings,
# list with equilibrium conditions, link to test source
rep_tpl = (self.class_name, self.eq_res, equi_doc, self.sum_df,
self.folder_dict[self.test].replace(self.folder_dict['source'], '').lstrip('\\'))
rep.write_equi(rep_tpl)
rep.save_report()
class TimeStep(Test):
"""
Timestep test for the testing battery
"""
def __init__(self, folder, file_name):
super(TimeStep, self).__init__(folder, file_name, 'timestep')
self.err_list = []
self.MP = True
self.cf = ConfigParser()
# config folder doesn't change, so it's put here to it's on one line
self.cf.read(os.path.join(os.path.split(folder)[0], '_config', 'settings.ini'))
self.start_ts = self.cf['tests'].getfloat('ts_start', fallback=1)
self.step_ts = self.cf['tests'].getfloat('ts_iter', fallback=0.5)
self.step_cnt = self.cf['tests'].getint('ts_maxiter', fallback=10)
self.ts_threshold = self.cf['tests'].getfloat('ts_threshold', fallback=0.015)
self.class_name = 'TimeStep'
def prepare_test(self):
"""
prepares the runs for this test
"""
rts = np.arange(self.base_builtin.iloc[1], self.base_builtin.iloc[0] + 1, 1)
base_full = self.model.run(return_timestamps=rts, reload=True)
col_lst = list(base_full)
for col in col_lst:
if base_full[col].all() == 0:
base_full[col] = np.nan
# endos that are always zero could be added to the report at some point 17.07.18/sk
self.base.add_run(base_full[self.endo_names])
self.model.create_full_df(self.base.run, 'timestep')
for i in range(self.step_cnt):
ts = self.start_ts * self.step_ts ** i
name = 'timestep_%s' % ts
# Run has inputs name,full_ID,exo_names=None,params=None,return_columns=None
self.run_lst.append(Run(name, 'timestep', [self.builtin_names.iloc[-1]], [ts], self.endo_names,
'TimeStep=%s' % ts, rts, reload=True))
def save_output(self):
"""
saving the output for the time step test
"""
# this is all the output that doens't got through MP
res_lst = []
# tracklist is just for testing purposes
trck_lst = []
comp_df = self.model.full_df_dict['timestep']
comp_df = comp_df.loc(axis=1)[:, self.stock_names]
base_name = 'base_%s' % self.base_builtin.iloc[-1]
res_lst.append((base_name, 1))
for i in range(1, self.step_cnt):
ts = self.start_ts * self.step_ts ** i
sm_name = 'timestep_%s' % ts
lg_name = 'timestep_%s' % (ts * 2)
sens_df = comp_df.loc(axis=1)[[sm_name, lg_name], :]
sens_df = sens_df.copy()
# dropna should be deleted 17.07.18/sk
# sens_df.dropna(inplace=True)
if (sens_df.isnull().sum(axis=1) == 0).all():
# absolute value is taken because we only care about the distance to the upper run
sens_df = abs(
(sens_df.loc(axis=1)[sm_name] - sens_df.loc(axis=1)[lg_name]) / sens_df.loc(axis=1)[lg_name])
est = sens_df.mean(axis=0).mean(axis=0)
else:
est = 1
res_lst.append((lg_name, est))
for i, step in enumerate(res_lst[1:]):
name, est = step
if est <= self.ts_threshold:
ts = name.split('_')[-1]
trck_lst.append((self.out_name, self.base_builtin.iloc[-1], ts, est))
self.ts_rep = (self.out_name, self.base_builtin.iloc[-1], ts, est)
self.save_lst_csv(trck_lst, 'ts_tracking', 'source',
['Model Name', 'Actual TS', 'Optimal TS', 'Opt Result'], append=True)
break
# the last element is i=8 because we don't use the first time step for iteration
elif i == 8:
# if it doesn't find the optimal timestep, we report a 'NF' for not found
trck_lst.append((self.out_name, self.base_builtin.iloc[-1], 'NF', est))
self.ts_rep = (self.out_name, self.base_builtin.iloc[-1], 'NF', est)
self.save_lst_csv(trck_lst, 'ts_tracking', 'source',
['Model Name', 'Actual TS', 'Optimal TS', 'Opt Result'], append=True)
break
self.save_lst_csv(res_lst, 'result', self.test, ['Timestep', 'Result'], append=False)
def write_report(self):
"""
write the report for the time step test
"""
rep = Report(self.folder, self.file)
# we have to pickle this because with MP, the passing of arguments is faulty
# the endo_its is not needed here, but still needs to be removed
f_path = os.path.join(self.folder_dict[self.test], 'endo_its.pickle')
os.remove(f_path)
rep_tpl = (
self.class_name, self.ts_rep,
self.folder_dict[self.test].replace(self.folder_dict['source'], '').lstrip('\\'))
rep.write_tstep(rep_tpl)
rep.save_report()
class Switches(Test):
"""
testing the different switch settings in all the combinations
"""
def __init__(self, folder, file_name):
super(Switches, self).__init__(folder, file_name, 'switches')
self.err_list = []
self.MP = True
self.class_name = 'Switches'
self.condensed = False
def create_switch_settings(self):
"""
# this needs to go to the switches test
creates the df with switch settings
condensed only returns the switch settings where all are turned on or turned off
:return:
"""
self.switch_lst = []
for i, row in self.switches.iterrows():
self.switch_lst.append(row['Py Name'])
self.nmb_switch = len(self.switch_lst)
if self.nmb_switch > 0:
set_switch = [np.reshape(np.array(i), (1, self.nmb_switch)) for i in
itertools.product([0, 1], repeat=self.nmb_switch)]
self.switch_df = pd.DataFrame(data=np.reshape(set_switch, (2 ** self.nmb_switch, self.nmb_switch)),
columns=self.switch_lst)
if self.condensed:
self.switch_df = self.switch_df.loc[self.switch_df.sum(axis=1).isin([0, self.nmb_switch])]
else:
self.switch_df = pd.DataFrame()
self.save_csv('switch_settings', self.switch_df, self.test)
def prepare_test(self):
"""
prepare the switcehs test
"""
self.create_switch_settings()
self.model.create_full_df(self.base.run, 'full')
self.model.create_full_df(self.base.run, 'sum')
for i, row in self.switch_df.iterrows():
name = 'switch_run_%s' % i
self.run_lst.append(Run(name, 'full', row.index, row.values, self.endo_names))
if row.sum() == 1:
self.run_lst.append(Run(name, 'sum', row.index, row.values, self.endo_names))
# maybe the endo plots don't need to be quite so numerous here... maybe just the stocks
def write_report(self):
"""
write the report for the switches test
"""
rep = Report(self.folder, self.file)
# we have to pickle this because with MP, the passing of arguments is faulty
f_path = os.path.join(self.folder_dict[self.test], 'endo_its.pickle')
pickle_in = open(f_path, 'rb')
endo_its = pickle.load(pickle_in)
pickle_in.close()
os.remove(f_path)
rep_tpl = (self.class_name, self.switch_df, endo_its,
self.folder_dict[self.test].replace(self.folder_dict['source'], '').lstrip('\\'))
rep.write_swit(rep_tpl)
rep.save_report()
class Distance(Test):
"""
the distance test of the tb
currently somewhat faulty and only available in testing mode
also has no setting in the config file
"""
def __init__(self, folder, file_name):
super(Distance, self).__init__(folder, file_name, 'distance')
self.err_list = []
self.MP = False
self.class_name = 'Distance'
# needs to be verified
# need all functions that contain a stock
self.stocklike_functions = ['DELAY1', 'DELAY1I', 'DELAY3', 'DELAY3I', 'DELAY N',
'SMOOTH', 'SMOOTHI', 'SMOOTH3', 'SMOOTH3I', 'SMOOTH N']
self.cf = ConfigParser()
# config folder doesn't change, so it's put here to it's on one line
self.cf.read(os.path.join(os.path.split(folder)[0], '_config', 'settings.ini'))
self.dist_maxiter = self.cf['tests'].getint('dist_maxiter', fallback=20)
def create_emtpy_matrix(self):
"""
create an NxN matrix full with np.nan
:return: df, empty matrix
"""
dm = np.empty((len(self.var_lst), len(self.var_lst)))
dm[:] = np.nan
self.dist_matrix = pd.DataFrame(dm)
self.dist_matrix.columns = self.var_lst
self.dist_matrix['name'] = self.var_lst
self.dist_matrix.set_index('name', inplace=True)
def make_loopdoc(self):
"""
:return:
"""
loop_doc = self.doc.copy()
for i, row in loop_doc.iterrows():
row = row.copy()
els = row['elements']
els = [x for x in els if not self.constant(x)]
if 'table_expr' in els:
els = []
loop_doc.at[i, 'elements'] = els
return loop_doc
def loop_tree(self, in_lst):
"""
:param in_lst:
:return:
"""
loop_doc = self.make_loopdoc()
new_level = []
i = 0
for lst in in_lst[i]:
# then we add the elements from the stocks as the first level
for var in lst:
n_lst = loop_doc.loc[loop_doc['Real Name'] == var]['elements'].iloc[0]
r_lst = loop_doc.loc[loop_doc['Real Name'] == var]['init elements'].iloc[0]
f_lst = [x for x in n_lst if x not in r_lst]
new_level.append(f_lst)
in_lst.append(new_level)
while True:
# then we iterate through the lists making a new list for each level of the
# length of the sum of elements of the previous level
i += 1
new_level = []
for lst in in_lst[i]:
if type(lst) == list:
for var in lst:
if var not in self.stock_names:
if not self.constant(var):
n_lst = loop_doc.loc[loop_doc['Real Name'] == var]['elements'].iloc[0]
if n_lst:
new_level.append(n_lst)
else:
new_level.append(np.nan)
else:
new_level.append(np.nan)
else:
new_level.append(np.nan)
else:
# for every loop that is already finished, there needs to be a nan added to keep the length correct
new_level.append(np.nan)
try:
# when all loops have finished, we break the while loop
if np.isnan(new_level).all():
return in_lst, i
except:
pass
# this is just avoid infinite loops, not sure what the threshold should be 19.06.18/sk
if i == self.dist_maxiter:
return in_lst, i
# each new level is added, the last level with all nan is not added
in_lst.append(new_level)
loop_df = pd.DataFrame(in_lst)
loop_df.to_csv(os.path.join(self.folder_dict[self.test], 'level%s.csv' % i))
def loop_explore(self, in_lst, src_lst, level, max_level):
"""
:param in_lst:
:param src_lst:
:param level:
:param max_level:
:return:
"""
out_lst = []
if level <= max_level:
for j, lst in enumerate(src_lst[level]):
if type(lst) == list:
for var in lst:
t_lst = in_lst[j].copy()
t_lst.append(var)
out_lst.append(t_lst)
else:
t_lst = in_lst[j].copy()
t_lst.append(np.nan)
out_lst.append(t_lst)
level += 1
return self.loop_explore(out_lst, src_lst, level, max_level)
else:
return in_lst
@staticmethod
def make_loopdict(in_lst):
"""
:param in_lst:
:return:
"""
loop_dict = {}
for lst in in_lst:
if lst[0] != lst[-1]:
key = lst[0]
if key in loop_dict:
loop_dict[key].append(lst)
else:
loop_dict[key] = [lst]
return loop_dict
def loop_combine(self, in_lst, loop_lst, loop_dict, iteration=0):
"""
:param in_lst:
:param loop_lst:
:param loop_dict:
:param iteration:
:return:
"""
out_lst = []
t_lst = []
for lst in in_lst:
# first we move the loops that are loops already to the loop list
if lst[0] == lst[-1]:
loop_lst.append(lst)
# then we move the loop elements that are not yet loops to a temporary list
# also we build the dict with the different starting points (stocklike vars)
else:
t_lst.append(lst)
if t_lst:
stock_lst = list(loop_dict.keys())
visited_lst = [stock_lst[0]]
for stock in stock_lst[1:]:
for lst in t_lst:
if lst[-1] not in visited_lst:
# this is to avoid infinite loops where the first loop element can only be completed
# by a loop of two other stocks
if lst.count(lst[-1]) < 2:
for el in loop_dict[lst[-1]]:
b_lst = lst.copy()
b_lst.extend(el[1:])
out_lst.append(b_lst)
visited_lst.append(stock)
iteration += 1
print(iteration)
return self.loop_combine(out_lst, loop_lst, loop_dict, iteration)
else:
return loop_lst
@staticmethod
def clean_looplst(in_lst, stock_lst):
"""
:param in_lst:
:param stock_lst:
:return:
"""
out_lst = []
for lst in in_lst:
# cleaning out the np.nan from the list to arrive at the loop building blocks
lst = [x for x in lst if not
|
pd.isnull(x)
|
pandas.isnull
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-04-16 16:46
# @Author : erwin
import pandas as pd
from common.util_function import *
data_dict_series = {
'1490707920': 10,
'1490708040': 20,
'1490708200': None,
'1490708100': 20,
}
pds =
|
pd.Series(data_dict_series, name='test')
|
pandas.Series
|
#!/usr/bin/env python
# -*-coding:utf-8 -*-
'''
@File : Stress_detection_script.py
@Time : 2022/03/17 09:45:59
@Author : <NAME>
@Contact : <EMAIL>
'''
import os
import logging
import plotly.express as px
import numpy as np
import pandas as pd
import zipfile
import fnmatch
import flirt.reader.empatica
import matplotlib.pyplot as plt
from tqdm import tqdm
from datetime import datetime, timedelta
import cvxopt as cv
from neurokit2 import eda_phasic
from matplotlib.font_manager import FontProperties
import matplotlib.dates as mdates
# rootPath = r"./"
# pattern = '*.zip'
rootPath = input("Enter Folder Path : ")
pattern = input("Enter File Name : ")
for root, dirs, files in os.walk(rootPath):
for filename in fnmatch.filter(files, pattern):
print(os.path.join(root, filename))
zipfile.ZipFile(os.path.join(root, filename)).extractall(
os.path.join(root, os.path.splitext(filename)[0]))
dir = os.path.splitext(pattern)[0]
# os.listdir(dir)
class process:
def moving_avarage_smoothing(X, k, description_str):
S = np.zeros(X.shape[0])
for t in tqdm(range(X.shape[0]), desc=description_str):
if t < k:
S[t] = np.mean(X[:t+1])
else:
S[t] = np.sum(X[t-k:t])/k
return S
def deviation_above_mean(unit, mean_unit, std_unit):
'''
Function takes 3 arguments
unit : number of Standard deviations above the mean
mean_unit : mean value of each signal
std_unit : standard deviation of each signal
'''
if unit == 0:
return (mean_unit)
else:
return (mean_unit + (unit*std_unit))
def Starting_timeStamp(column, time_frames, deviation_metric):
'''
Function takes signal, its timestamps and threshold for calculating the starting time when the signal crosses the throshold value
'''
starting_time_index = []
for i in range(len(column)-1): #iterating till the end of the array
if column[i] < deviation_metric and column[i+1] > deviation_metric: # checking if the n+1 element is greater than nth element to conclude if the signal is increasing
starting_time_index.append(time_frames[i]) #appending the timestamp's index to the declared empty array
return starting_time_index
def Ending_timeStamp(column, time_frames, deviation_metric):
'''
Function takes signal, its timestamps and threshold for calculating the starting time when the signal crosses the throshold value
'''
time_index = []
for i in range(len(column)-1):
if column[i] > deviation_metric and column[i+1] < deviation_metric: # checking if the n+1 element is lesser than nth element to conclude if the signal is decreasing
time_index.append(time_frames[i])
if column[len(column) - 1] > deviation_metric: # checking for hanging ends, where the signal stops abruptly
time_index.insert(
len(time_index), time_frames[len(time_frames) - 1]) # inserting the timestamp's index to the last index of the array
else:
pass
return time_index
def Extract_HRV_Information():
global hrv_features # declaring global to get access them for combined plot function
global hrv_events_df # declaring global to get access them for combined plot function
ibi = pd.read_csv(rootPath+'/'+dir+'\IBI.csv')
mean_ibi = ibi[' IBI'].mean()
average_heart_rate = 60/mean_ibi
print('mean ibi is :', mean_ibi)
print('mean heart rate :', average_heart_rate.round())
ibis = flirt.reader.empatica.read_ibi_file_into_df(
rootPath+'/'+dir + '\IBI.csv')
hrv_features = flirt.get_hrv_features(
ibis['ibi'], 128, 1, ["td", "fd"], 0.2)
hrv_features = hrv_features.dropna(how='any', axis=0)
hrv_features.reset_index(inplace=True)
hrv_features['datetime'] = hrv_features['datetime'].dt.tz_convert('US/Eastern')
hrv_features['datetime'] = pd.to_datetime(hrv_features['datetime'])
hrv_features['datetime'] = hrv_features['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
# smoothing the curve
print('\n', '******************** Smoothing The Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
hrv_features['hrv_rmssd'], 500, "Processing HRV Data")
hrv_features['MAG_K500'] = MAG_K500
# hrv_features.to_csv("./Metadata/"+ dir+"_HRV.csv")
# hrv_features.to_csv(os.path.join('./Metadata'+dir+'_HRV.csv'))
mean_rmssd = hrv_features['hrv_rmssd'].mean()
std_rmssd = hrv_features['hrv_rmssd'].std()
# getting the starting and ending time of of the signal
starting_timestamp = process.Starting_timeStamp(hrv_features['MAG_K500'], hrv_features['datetime'],
process.deviation_above_mean(1, mean_rmssd, std_rmssd))
ending_timestamp = process.Ending_timeStamp(hrv_features['MAG_K500'], hrv_features['datetime'],
process.deviation_above_mean(1, mean_rmssd, std_rmssd))
# in the below if case i am assuming that there was no events that crossed the threshold
if len(starting_timestamp) < 1:
fig, ax1 = plt.subplots(figsize=(30, 10))
ax1.plot(hrv_features['datetime'],
hrv_features['MAG_K500'], color='red')
# fig.savefig('./Plots/HRV_figure.png')
else:
#check if the len of starting timestamps and ending timestamps are equal if not popping the last element of the ending timestamp
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
else:
pass
difference = [] # empty array to see how long the event lasts in seconds
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i) #subtracting ending timestamp - starting timestamp to get difference in seconds
for i in difference:
time_delta_minutes.append(i.total_seconds()/60) # converting the second's difference to minuted
time_delta_minutes
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 5.00: #checking if the each episode is more then 5 minutes
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df = pd.DataFrame(ending_timestamp)
frames = (starting_timestamp_df, ending_timestamp_df)
hrv_events_df = pd.concat(frames, axis=1)
hrv_events_df.columns = ['Starting Timestamp', 'Ending Timestamp']
hrv_events_df['Starting Timestamp'] = hrv_events_df['Starting Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S") #converting it to Y:M:D H:M:S to ignore nanoseconds in timestamp dataframe
hrv_events_df['Ending Timestamp'] = hrv_events_df['Ending Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
hrv_events_df = hrv_events_df.loc[desired_time_index, :] # selecting only the timestamps which crosses the time threshold limit
fig, ax = plt.subplots(figsize=(20, 6))
ax.plot(hrv_features['datetime'],
hrv_features['MAG_K500'], color='red')
for d in hrv_events_df.index:
ax.axvspan(hrv_events_df['Starting Timestamp'][d], hrv_events_df['Ending Timestamp']
[d], facecolor="g", edgecolor="none", alpha=0.5)
ax.relim()
ax.autoscale_view()
# fig.savefig('./Plots/HRV_figure.png')
return hrv_features, hrv_events_df
def Extract_ACC_Infromation():
global acc_df
global acc_events_df
acc_df = pd.read_csv(rootPath+'/'+dir + '/ACC.csv')
acc_df = flirt.reader.empatica.read_acc_file_into_df(
rootPath+'/'+dir + '/ACC.csv')
acc_df['Magnitude'] = np.sqrt(
acc_df['acc_x']**2 + acc_df['acc_y']**2 + acc_df['acc_z']**2)
print("Magnitude Mean : ", acc_df['Magnitude'].mean())
acc_df.reset_index(inplace=True)
acc_df['datetime'] = acc_df['datetime'].dt.tz_convert('US/Eastern')
acc_df['datetime'] = pd.to_datetime(acc_df['datetime'])
acc_df['datetime'] = acc_df['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
print('\n', '******************** Smoothing The ACC Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
acc_df['Magnitude'], 15000, "Processing ACC Data")
acc_df['MAG_K500'] = MAG_K500
# acc_df.to_csv("./Metadata/"+ dir+"_ACC.csv")
mean_acc_magnitude = acc_df['Magnitude'].mean()
std_acc_magnitude = acc_df['Magnitude'].std()
print("Average Magnitude of the Acc Data : ", mean_acc_magnitude)
starting_timestamp = process.Starting_timeStamp(acc_df['MAG_K500'], acc_df['datetime'],
process.deviation_above_mean(0.20, mean_acc_magnitude, std_acc_magnitude))
ending_timestamp = process.Ending_timeStamp(acc_df['MAG_K500'], acc_df['datetime'],
process.deviation_above_mean(0.20, mean_acc_magnitude, std_acc_magnitude))
if len(starting_timestamp) < 1:
fig, ax2 = plt.subplots(figsize=(30, 10))
ax2.plot(acc_df['datetime'], acc_df['MAG_K500'], color='red')
fig.savefig('./Plots/ACC_figure.png')
else:
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
difference = [] # initialization of result list
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i)
for i in difference:
time_delta_minutes.append(i.total_seconds()/60)
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 2.00:
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df = pd.DataFrame(ending_timestamp)
frames = (starting_timestamp_df, ending_timestamp_df)
acc_events_df = pd.concat(frames, axis=1)
acc_events_df.columns = ['Starting Timestamp', 'Ending Timestamp']
acc_events_df['Starting Timestamp'] = acc_events_df['Starting Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
acc_events_df['Ending Timestamp'] = acc_events_df['Ending Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
acc_events_df = acc_events_df.loc[desired_time_index, :]
# acc_events_df.to_csv(rootPath+"timestamp_" +dir+ "_ACC.csv")
fig, ax2 = plt.subplots(figsize=(30, 10))
ax2.plot(acc_df['datetime'], acc_df['MAG_K500'], color='red')
for d in acc_events_df.index:
ax2.axvspan(acc_events_df['Starting Timestamp'][d], acc_events_df['Ending Timestamp']
[d], facecolor="g", edgecolor="none", alpha=0.5)
ax2.relim()
ax2.autoscale_view()
fig.savefig('./Plots/ACC_figure.png')
def Extract_GSR_Phasic_Information():
global eda_df
global eda_phasic_df
global eda_phasic_events_df
eda_df = pd.read_csv(rootPath+'/'+dir+'/EDA.csv')
eda_df = flirt.reader.empatica.read_eda_file_into_df(
rootPath+'/' + dir + '/EDA.csv')
eda_df.reset_index(inplace=True)
eda_df['datetime'] = eda_df['datetime'].dt.tz_convert('US/Eastern')
eda_df['datetime'] = pd.to_datetime(eda_df['datetime'])
eda_df['datetime'] = eda_df['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
eda = np.array(eda_df['eda'])
Phasic_Tonic_DF = eda_phasic(eda, 4, method='cvxEDA')
eda_df['tonic'] = Phasic_Tonic_DF['EDA_Tonic']
eda_df['phasic'] = Phasic_Tonic_DF['EDA_Phasic']
eda_phasic_df = eda_df.copy()
print('\n', '******************** Smoothing The EDA Phasic Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
eda_phasic_df['phasic'], 2000, "Processing EDA Phasic Data")
eda_phasic_df['MAG_K500'] = MAG_K500
# hrv_features.to_csv('hrv_features.csv')
mean_eda_phasic = eda_phasic_df['phasic'].mean()
std_eda_phasic = eda_phasic_df['phasic'].std()
starting_timestamp = process.Starting_timeStamp(eda_phasic_df['MAG_K500'], eda_phasic_df['datetime'],
process.deviation_above_mean(1, mean_eda_phasic, std_eda_phasic))
ending_timestamp = process.Ending_timeStamp(eda_phasic_df['MAG_K500'], eda_phasic_df['datetime'],
process.deviation_above_mean(1, mean_eda_phasic, std_eda_phasic))
if len(starting_timestamp) < 1:
fig, ax2 = plt.subplots(figsize=(30, 10))
ax2.plot(eda_phasic_df['datetime'],
eda_phasic_df['MAG_K500'], color='red')
fig.savefig('./Plots/EDA_Phasic_figure.png')
else:
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
difference = [] # initialization of result list
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i)
for i in difference:
time_delta_minutes.append(i.total_seconds()/60)
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 2.00:
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df = pd.DataFrame(ending_timestamp)
frames = (starting_timestamp_df, ending_timestamp_df)
eda_phasic_events_df = pd.concat(frames, axis=1)
eda_phasic_events_df.columns = [
'Starting Timestamp', 'Ending Timestamp']
eda_phasic_events_df['Starting Timestamp'] = eda_phasic_events_df['Starting Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
eda_phasic_events_df['Ending Timestamp'] = eda_phasic_events_df['Ending Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
eda_phasic_events_df = eda_phasic_events_df.loc[desired_time_index, :]
# eda_phasic_events_df.to_csv(rootPath+"timestamp_" + dir + "_EDA.csv")
fig, ax3 = plt.subplots(figsize=(30, 10))
ax3.plot(eda_phasic_df['datetime'],
eda_phasic_df['MAG_K500'], color='red')
for d in eda_phasic_events_df.index:
ax3.axvspan(eda_phasic_events_df['Starting Timestamp'][d],
eda_phasic_events_df['Ending Timestamp'][d], facecolor="g", edgecolor="none", alpha=0.5)
ax3.relim()
ax3.autoscale_view()
fig.savefig('./Plots/EDA_Phasic_figure.png')
return eda_df
def Extract_GSR_Tonic_Information():
global eda_tonic_df
global eda_tonic_events_df
eda_df = pd.read_csv(rootPath+'/'+dir+'/EDA.csv')
eda_df = flirt.reader.empatica.read_eda_file_into_df(
rootPath+'/' + dir + '/EDA.csv')
eda_df.reset_index(inplace=True)
eda_df['datetime'] = eda_df['datetime'].dt.tz_convert('US/Eastern')
eda_df['datetime'] = pd.to_datetime(eda_df['datetime'])
eda_df['datetime'] = eda_df['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
eda = np.array(eda_df['eda'])
Phasic_Tonic_DF = eda_phasic(eda, 4, method='cvxEDA')
eda_df['tonic'] = Phasic_Tonic_DF['EDA_Tonic']
eda_df['phasic'] = Phasic_Tonic_DF['EDA_Phasic']
eda_tonic_df = eda_df.copy()
print('\n', '******************** Smoothing The EDA Tonic Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
eda_tonic_df['tonic'], 2000, "Processing EDA Tonic Data")
eda_tonic_df['MAG_K500'] = MAG_K500
# hrv_features.to_csv('hrv_features.csv')
mean_eda_tonic = eda_tonic_df['tonic'].mean()
std_eda_tonic = eda_tonic_df['tonic'].std()
starting_timestamp = process.Starting_timeStamp(eda_tonic_df['MAG_K500'], eda_tonic_df['datetime'],
process.deviation_above_mean(1, mean_eda_tonic, std_eda_tonic))
ending_timestamp = process.Ending_timeStamp(eda_tonic_df['MAG_K500'], eda_tonic_df['datetime'],
process.deviation_above_mean(1, mean_eda_tonic, std_eda_tonic))
if len(starting_timestamp) < 1:
fig, ax2 = plt.subplots(figsize=(30, 10))
ax2.plot(eda_tonic_df['datetime'],
eda_tonic_df['MAG_K500'], color='red')
fig.savefig('./Plots/EDA_Phasic_figure.png')
else:
print("entering final else block")
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
difference = [] # initialization of result list
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i)
for i in difference:
time_delta_minutes.append(i.total_seconds()/60)
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 2.00:
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df = pd.DataFrame(ending_timestamp)
frames = (starting_timestamp_df, ending_timestamp_df)
eda_tonic_events_df = pd.concat(frames, axis=1)
eda_tonic_events_df.columns = [
'Starting Timestamp', 'Ending Timestamp']
eda_tonic_events_df['Starting Timestamp'] = eda_tonic_events_df['Starting Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
eda_tonic_events_df['Ending Timestamp'] = eda_tonic_events_df['Ending Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
eda_tonic_events_df = eda_tonic_events_df.loc[desired_time_index, :]
# eda_tonic_events_df.to_csv(rootPath+"timestamp_" +dir+ "_EDA.csv")
fig, ax4 = plt.subplots(figsize=(30, 10))
ax4.plot(eda_tonic_df['datetime'],
eda_tonic_df['MAG_K500'], color='red')
for d in eda_tonic_events_df.index:
ax4.axvspan(eda_tonic_events_df['Starting Timestamp'][d],
eda_tonic_events_df['Ending Timestamp'][d], facecolor="g", edgecolor="none", alpha=0.5)
ax4.relim()
ax4.autoscale_view()
fig.savefig('./Plots/EDA_tonic_figure.png')
def Extract_Heart_Rate_Features():
global hr_df
global hr_events_df
hr_df = flirt.reader.empatica.read_hr_file_into_df(
rootPath+'/'+dir+'/HR.csv')
hr_df.reset_index(inplace=True)
hr_df['datetime'] = hr_df['datetime'].dt.tz_convert('US/Eastern')
hr_df['datetime'] = pd.to_datetime(hr_df['datetime'])
hr_df['datetime'] = hr_df['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
print('\n', '******************** Smoothing The Heart Rate Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
hr_df['hr'], 500, "Processing Heart Rate Data")
hr_df['MAG_K500'] = MAG_K500
# hrv_features.to_csv('hrv_features.csv')
hr_avg = hr_df['MAG_K500'].mean()
hr_std = hr_df['MAG_K500'].std()
starting_timestamp = process.Starting_timeStamp(
hr_df['MAG_K500'], hr_df['datetime'], process.deviation_above_mean(1, hr_avg, hr_std))
ending_timestamp = process.Ending_timeStamp(
hr_df['MAG_K500'], hr_df['datetime'], process.deviation_above_mean(1, hr_avg, hr_std))
if len(starting_timestamp) < 1:
fig, ax2 = plt.subplots(figsize=(30, 10))
ax2.plot(hr_df['datetime'], hr_df['MAG_K500'], color='red')
fig.savefig('./Plots/Heart_rate_figure.png')
else:
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
difference = [] # initialization of result list
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i)
for i in difference:
time_delta_minutes.append(i.total_seconds()/60)
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 2.00:
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df = pd.DataFrame(ending_timestamp)
frames = (starting_timestamp_df, ending_timestamp_df)
hr_events_df = pd.concat(frames, axis=1)
hr_events_df.columns = ['Starting Timestamp', 'Ending Timestamp']
hr_events_df['Starting Timestamp'] = hr_events_df['Starting Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
hr_events_df['Ending Timestamp'] = hr_events_df['Ending Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
hr_events_df = hr_events_df.loc[desired_time_index, :]
# hr_events_df.to_csv(rootPath+"timestamp_" +dir+ "_EDA.csv")
fig, ax4 = plt.subplots(figsize=(30, 10))
ax4.plot(hr_df['datetime'], hr_df['MAG_K500'], color='red')
for d in hr_events_df.index:
ax4.axvspan(hr_events_df['Starting Timestamp'][d], hr_events_df['Ending Timestamp']
[d], facecolor="g", edgecolor="none", alpha=0.5)
ax4.relim()
ax4.autoscale_view()
fig.savefig('./Plots/Heart_Rate_figure.png')
def handle_overlapping_timestamps():
global concatnated_frame, merged_smaller_events, merged_smaller_events_2
#making copies of the dataframes to avoid any erros
hrv_events_df_copy = hrv_events_df.copy()
hr_events_df_copy = hr_events_df.copy()
acc_events_df_copy = acc_events_df.copy()
eda_phasic_events_df_copy = eda_phasic_events_df.copy()
eda_tonic_events_df_copy = eda_tonic_events_df.copy()
# concatnating all the individual signal's episode into a single dataframe
concatnated_frame = pd.concat([hrv_events_df_copy, hr_events_df_copy, acc_events_df_copy, eda_phasic_events_df_copy, eda_tonic_events_df_copy])
concatnated_frame = pd.DataFrame(concatnated_frame)
#converting the timestamp format to unix format
concatnated_frame["Starting Timestamp"] = concatnated_frame["Starting Timestamp"].apply(lambda x: pd.Timestamp(x).timestamp())
concatnated_frame["Ending Timestamp"] = concatnated_frame["Ending Timestamp"].apply(lambda x: pd.Timestamp(x).timestamp())
concatnated_frame = concatnated_frame.sort_values(by=['Starting Timestamp', 'Ending Timestamp']).reset_index(drop = True)
concatnated_frame['Starting Timestamp'] = concatnated_frame["Starting Timestamp"].apply(lambda x: pd.to_datetime(x, unit='s'))
concatnated_frame['Ending Timestamp'] = concatnated_frame['Ending Timestamp'].apply(lambda x: pd.to_datetime(x, unit='s'))
concatnated_frame = concatnated_frame.reset_index(drop = True)
print('###################### Handling Overlapping Events ######################')
# running for loop one time clubs 2 times joins 1 overlapping event, so running len/2 times to join all possible overlapping events
# didnt use a while because the flag condition was confusing
for i in range(int(len(concatnated_frame)/2)):
for i in tqdm(range(len(concatnated_frame)-1)):
try:
delta_a = concatnated_frame['Ending Timestamp'][i] - concatnated_frame['Starting Timestamp'][i]
delta_b = concatnated_frame['Ending Timestamp'][i+1] - concatnated_frame['Starting Timestamp'][i+1]
c1 = min(concatnated_frame['Starting Timestamp'][i], concatnated_frame['Starting Timestamp'][i+1])
c2 = max(concatnated_frame['Ending Timestamp'][i], concatnated_frame['Ending Timestamp'][i+1])
Dc = c2-c1
if ((delta_a+delta_b)>Dc):
concatnated_frame['Starting Timestamp'][i] = c1
concatnated_frame['Ending Timestamp'][i] = c2
concatnated_frame = concatnated_frame.drop(concatnated_frame.index[i+1]).reset_index(drop=True)
except KeyError as error:
logging.info("index overflow handling exception")
print('###################### Handling smaller Events ######################')
concatnated_frame['Starting Timestamp'] = concatnated_frame["Starting Timestamp"].apply(lambda x: pd.to_datetime(x, unit='s'))
concatnated_frame['Ending Timestamp'] = concatnated_frame['Ending Timestamp'].apply(lambda x: pd.to_datetime(x, unit='s'))
# running for loop one time clubs 2 times joins 1 overlapping event, so running len/2 times to join all possible overlapping events
# didnt use a while because the flag condition was confusing
for i in tqdm(range(int(len(concatnated_frame)/2))):
for i in range(len(concatnated_frame)-1):
try:
#checking if the successive episodes occur with in less than 10 minutes and joining it.
if (concatnated_frame['Starting Timestamp'][i+1] - concatnated_frame['Ending Timestamp'][i] < timedelta(minutes = 10)):
concatnated_frame['Ending Timestamp'][i] = concatnated_frame['Ending Timestamp'][i+1]
concatnated_frame = concatnated_frame.drop(concatnated_frame.index[i+1]).reset_index(drop=True)
merged_smaller_events = concatnated_frame.copy()
except KeyError as error:
logging.info('ignore index overflow error')
merged_smaller_events_2 = merged_smaller_events.copy()
merged_smaller_events['Starting Timestamp'] = merged_smaller_events['Starting Timestamp'].dt.strftime("%m/%d/%Y, %I:%M:%S %p")
merged_smaller_events['Ending Timestamp'] = merged_smaller_events['Ending Timestamp'].dt.strftime("%m/%d/%Y, %I:%M:%S %p")
merged_smaller_events.index = merged_smaller_events.index + 1
return merged_smaller_events, merged_smaller_events_2
def stack_plot_results():
'''
for the EMA data, I am manually indexing the rows and checking condition for each column.
'''
ema_df = pd.read_csv('./EMA_Survey/ema.csv')
ema_df = ema_df.iloc[2: , :]
ema_df.reset_index(inplace=True)
forenoon_ema_df = ema_df.iloc[[0], :]
afternoon_ema_df = ema_df.iloc[[1], :]
forenoon_data = []
forenoon_data.append('Start Time = ' + str((pd.to_datetime(forenoon_ema_df['StartDate']).dt.strftime("%m/%d/%Y, %I:%M:%S %p").values)))
forenoon_data.append('End Time = ' + str((
|
pd.to_datetime(forenoon_ema_df['EndDate'])
|
pandas.to_datetime
|
# Required imports
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import pylab
import scipy
import random
import datetime
import re
import time
from math import sqrt
import matplotlib.dates as mdates
from matplotlib.dates import date2num, num2date
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn import preprocessing
pd.set_option('display.max_columns', None) # to view all columns
from scipy.optimize import curve_fit
from supersmoother import SuperSmoother
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge, Lasso, RidgeCV, LassoCV
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
import scipy.stats as stats
import warnings
warnings.filterwarnings("ignore")
from pyproj import Proj, Transformer
from ipyleaflet import (Map, basemaps, WidgetControl, GeoJSON,
LayersControl, Icon, Marker,FullScreenControl,
CircleMarker, Popup, AwesomeIcon)
from ipywidgets import HTML
plt.rcParams["font.family"] = "Times New Roman"
class functions:
def __init__(self, data):
self.setData(data)
self.__jointData = [None, 0]
# DATA VALIDATION
def __isValid_Data(self, data):
if(str(type(data)).lower().find('dataframe') == -1):
return (False, 'Make sure the data is a pandas DataFrame.\n')
if(not self.__hasColumns_Data(data)):
return (False, 'Make sure that ALL of the columns specified in the REQUIREMENTS are present.\n')
else:
return (True, None)
def __isValid_Construction_Data(self, data):
if(str(type(data)).lower().find('dataframe') == -1):
return (False, 'Make sure the data is a pandas DataFrame.\n')
if(not self.__hasColumns_Construction_Data(data)):
return (False, 'Make sure that ALL of the columns specified in the REQUIREMENTS are present.\n')
else:
return (True, None)
# COLUMN VALIDATION
def __hasColumns_Data(self, data):
find = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']
cols = list(data.columns)
cols = [x.upper() for x in cols]
hasCols = all(item in cols for item in find)
return hasCols
def __hasColumns_Construction_Data(self, data):
find = ['STATION_ID', 'AQUIFER', 'WELL_USE', 'LATITUDE', 'LONGITUDE', 'GROUND_ELEVATION', 'TOTAL_DEPTH']
cols = list(data.columns)
cols = [x.upper() for x in cols]
hasCols = all(item in cols for item in find)
return hasCols
# SETTING DATA
def setData(self, data, verbose=True):
validation = self.__isValid_Data(data)
if(validation[0]):
# Make all columns all caps
cols_upper = [x.upper() for x in list(data.columns)]
data.columns = cols_upper
self.data = data
if(verbose):
print('Successfully imported the data!\n')
self.__set_units()
else:
print('ERROR: {}'.format(validation[1]))
return self.REQUIREMENTS_DATA()
def setConstructionData(self, construction_data, verbose=True):
validation = self.__isValid_Construction_Data(construction_data)
if(validation[0]):
# Make all columns all caps
cols_upper = [x.upper() for x in list(construction_data.columns)]
construction_data.columns = cols_upper
self.construction_data = construction_data.set_index(['STATION_ID'])
if(verbose):
print('Successfully imported the construction data!\n')
else:
print('ERROR: {}'.format(validation[1]))
return self.REQUIREMENTS_CONSTRUCTION_DATA()
def jointData_is_set(self, lag):
if(str(type(self.__jointData[0])).lower().find('dataframe') == -1):
return False
else:
if(self.__jointData[1]==lag):
return True
else:
return False
def set_jointData(self, data, lag):
self.__jointData[0] = data
self.__jointData[1] = lag
# GETTING DATA
def getData(self):
return self.data
def get_Construction_Data(self):
return self.construction_data
# MESSAGES FOR INVALID DATA
def REQUIREMENTS_DATA(self):
print('PYLENM DATA REQUIREMENTS:\nThe imported data needs to meet ALL of the following conditions to have a successful import:')
print(' 1) Data should be a pandas dataframe.')
print(" 2) Data must have these column names: \n ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']")
def REQUIREMENTS_CONSTRUCTION_DATA(self):
print('PYLENM CONSTRUCTION REQUIREMENTS:\nThe imported construction data needs to meet ALL of the following conditions to have a successful import:')
print(' 1) Data should be a pandas dataframe.')
print(" 2) Data must have these column names: \n ['station_id', 'aquifer', 'well_use', 'latitude', 'longitude', 'ground_elevation', 'total_depth']")
# Helper function for plot_correlation
# Sorts analytes in a specific order: 'TRITIUM', 'URANIUM-238','IODINE-129','SPECIFIC CONDUCTANCE', 'PH', 'DEPTH_TO_WATER'
def __custom_analyte_sort(self, analytes):
my_order = 'TURISPDABCEFGHJKLMNOQVWXYZ-_abcdefghijklmnopqrstuvwxyz135790 2468'
return sorted(analytes, key=lambda word: [my_order.index(c) for c in word])
def __plotUpperHalf(self, *args, **kwargs):
corr_r = args[0].corr(args[1], 'pearson')
corr_text = f"{corr_r:2.2f}"
ax = plt.gca()
ax.set_axis_off()
marker_size = abs(corr_r) * 10000
ax.scatter([.5], [.5], marker_size, [corr_r], alpha=0.6, cmap="coolwarm",
vmin=-1, vmax=1, transform=ax.transAxes)
font_size = abs(corr_r) * 40 + 5
ax.annotate(corr_text, [.5, .48,], xycoords="axes fraction", # [.5, .48,]
ha='center', va='center', fontsize=font_size, fontweight='bold')
# Description:
# Removes all columns except 'COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME', 'RESULT', and 'RESULT_UNITS'.
# If the user specifies additional columns in addition to the ones listed above, those columns will be kept.
# The function returns a dataframe and has an optional parameter to be able to save the dataframe to a csv file.
# Parameters:
# data (dataframe): data to simplify
# inplace (bool): save data to current working dataset
# columns (list of strings): list of any additional columns on top of ['COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME', 'RESULT', and 'RESULT_UNITS'] to be kept in the dataframe.
# save_csv (bool): flag to determine whether or not to save the dataframe to a csv file.
# file_name (string): name of the csv file you want to save
# save_dir (string): name of the directory you want to save the csv file to
def simplify_data(self, data=None, inplace=False, columns=None, save_csv=False, file_name= 'data_simplified', save_dir='data/'):
if(str(type(data)).lower().find('dataframe') == -1):
data = self.data
else:
data = data
if(columns==None):
sel_cols = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']
else:
hasColumns = all(item in list(data.columns) for item in columns)
if(hasColumns):
sel_cols = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS'] + columns
else:
print('ERROR: specified column(s) do not exist in the data')
return None
data = data[sel_cols]
data.COLLECTION_DATE = pd.to_datetime(data.COLLECTION_DATE)
data = data.sort_values(by="COLLECTION_DATE")
dup = data[data.duplicated(['COLLECTION_DATE', 'STATION_ID','ANALYTE_NAME', 'RESULT'])]
data = data.drop(dup.index)
data = data.reset_index().drop('index', axis=1)
if(save_csv):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
data.to_csv(save_dir + file_name + '.csv')
print('Successfully saved "' + file_name +'.csv" in ' + save_dir)
if(inplace):
self.setData(data, verbose=False)
return data
# Description:
# Returns the Maximum Concentration Limit value for the specified analyte.
# Example: 'TRITIUM' returns 1.3
# Parameters:
# analyte_name (string): name of the analyte to be processed
def get_MCL(self, analyte_name):
mcl_dictionary = {'TRITIUM': 1.3, 'URANIUM-238': 1.31, 'NITRATE-NITRITE AS NITROGEN': 1,
'TECHNETIUM-99': 2.95, 'IODINE-129': 0, 'STRONTIUM-90': 0.9
}
return mcl_dictionary[analyte_name]
def __set_units(self):
analytes = list(np.unique(self.data[['ANALYTE_NAME']]))
mask1 = ~self.data[['ANALYTE_NAME','RESULT_UNITS']].duplicated()
res = self.data[['ANALYTE_NAME','RESULT_UNITS']][mask1]
mask2 = ~self.data[['ANALYTE_NAME']].duplicated()
res = res[mask2]
unit_dictionary = pd.Series(res.RESULT_UNITS.values,index=res.ANALYTE_NAME).to_dict()
self.unit_dictionary = unit_dictionary
# Description:
# Returns the unit of the analyte you specify.
# Example: 'DEPTH_TO_WATER' returns 'ft'
# Parameters:
# analyte_name (string): name of the analyte to be processed
def get_unit(self, analyte_name):
return self.unit_dictionary[analyte_name]
# Description:
# Filters construction data based on one column. You only specify ONE column to filter by, but can selected MANY values for the entry.
# Parameters:
# data (dataframe): dataframe to filter
# col (string): column to filter. Example: col='STATION_ID'
# equals (list of strings): values to filter col by. Examples: equals=['FAI001A', 'FAI001B']
def filter_by_column(self, data=None, col=None, equals=[]):
if(data is None):
return 'ERROR: DataFrame was not provided to this function.'
else:
if(str(type(data)).lower().find('dataframe') == -1):
return 'ERROR: Data provided is not a pandas DataFrame.'
else:
data = data
# DATA VALIDATION
if(col==None):
return 'ERROR: Specify a column name to filter by.'
data_cols = list(data.columns)
if((col in data_cols)==False): # Make sure column name exists
return 'Error: Column name "{}" does not exist'.format(col)
if(equals==[]):
return 'ERROR: Specify a value that "{}" should equal to'.format(col)
data_val = list(data[col])
for value in equals:
if((value in data_val)==False):
return 'ERROR: No value equal to "{}" in "{}".'.format(value, col)
# QUERY
final_data = pd.DataFrame()
for value in equals:
current_data = data[data[col]==value]
final_data = pd.concat([final_data, current_data])
return final_data
# Description:
# Returns a list of the well names filtered by the unit(s) specified.
# Parameters:
# units (list of strings): Letter of the well to be filtered (e.g. [‘A’] or [‘A’, ‘D’])
def filter_wells(self, units):
data = self.data
if(units==None):
units= ['A', 'B', 'C', 'D']
def getUnits():
wells = list(np.unique(data.STATION_ID))
wells = pd.DataFrame(wells, columns=['STATION_ID'])
for index, row in wells.iterrows():
mo = re.match('.+([0-9])[^0-9]*$', row.STATION_ID)
last_index = mo.start(1)
wells.at[index, 'unit'] = row.STATION_ID[last_index+1:]
u = wells.unit.iloc[index]
if(len(u)==0): # if has no letter, use D
wells.at[index, 'unit'] = 'D'
if(len(u)>1): # if has more than 1 letter, remove the extra letter
if(u.find('R')>0):
wells.at[index, 'unit'] = u[:-1]
else:
wells.at[index, 'unit'] = u[1:]
u = wells.unit.iloc[index]
if(u=='A' or u=='B' or u=='C' or u=='D'):
pass
else:
wells.at[index, 'unit'] = 'D'
return wells
df = getUnits()
res = df.loc[df.unit.isin(units)]
return list(res.STATION_ID)
# Description:
# Removes outliers from a dataframe based on the z_scores and returns the new dataframe.
# Parameters:
# data (dataframe): data for the outliers to removed from
# z_threshold (float): z_score threshold to eliminate.
def remove_outliers(self, data, z_threshold=4):
z = np.abs(stats.zscore(data))
row_loc = np.unique(np.where(z > z_threshold)[0])
data = data.drop(data.index[row_loc])
return data
# Description:
# Returns a csv file saved to save_dir with details pertaining to the specified analyte.
# Details include the well names, the date ranges and the number of unique samples.
# Parameters:
# analyte_name (string): name of the analyte to be processed
# save_dir (string): name of the directory you want to save the csv file to
def get_analyte_details(self, analyte_name, filter=False, col=None, equals=[], save_to_file = False, save_dir='analyte_details'):
data = self.data
data = data[data.ANALYTE_NAME == analyte_name].reset_index().drop('index', axis=1)
data = data[~data.RESULT.isna()]
data = data.drop(['ANALYTE_NAME', 'RESULT', 'RESULT_UNITS'], axis=1)
data.COLLECTION_DATE = pd.to_datetime(data.COLLECTION_DATE)
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(data.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
data = data[data['STATION_ID'].isin(intersect_wells)]
info = []
wells = np.unique(data.STATION_ID.values)
for well in wells:
current = data[data.STATION_ID == well]
startDate = current.COLLECTION_DATE.min().date()
endDate = current.COLLECTION_DATE.max().date()
numSamples = current.duplicated().value_counts()[0]
info.append({'Well Name': well, 'Start Date': startDate, 'End Date': endDate,
'Date Range (days)': endDate-startDate ,
'Unique samples': numSamples})
details = pd.DataFrame(info)
details.index = details['Well Name']
details = details.drop('Well Name', axis=1)
details = details.sort_values(by=['Start Date', 'End Date'])
details['Date Range (days)'] = (details['Date Range (days)']/ np.timedelta64(1, 'D')).astype(int)
if(save_to_file):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
details.to_csv(save_dir + '/' + analyte_name + '_details.csv')
return details
# Description:
# Returns a dataframe with a summary of the data for certain analytes.
# Summary includes the date ranges and the number of unique samples and other statistics for the analyte results.
# Parameters:
# analytes (list of strings): list of analyte names to be processed. If left empty, a list of all the analytes in the data will be used.
# sort_by (string): {‘date’, ‘samples’, ‘wells’} sorts the data by either the dates by entering: ‘date’, the samples by entering: ‘samples’, or by unique well locations by entering ‘wells’.
# ascending (bool): flag to sort in ascending order.
def get_data_summary(self, analytes=None, sort_by='date', ascending=False, filter=False, col=None, equals=[]):
data = self.data
if(analytes == None):
analytes = data.ANALYTE_NAME.unique()
data = data.loc[data.ANALYTE_NAME.isin(analytes)].drop(['RESULT_UNITS'], axis=1)
data = data[~data.duplicated()] # remove duplicates
data.COLLECTION_DATE = pd.to_datetime(data.COLLECTION_DATE)
data = data[~data.RESULT.isna()]
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(data.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
data = data[data['STATION_ID'].isin(intersect_wells)]
info = []
for analyte_name in analytes:
query = data[data.ANALYTE_NAME == analyte_name]
startDate = min(query.COLLECTION_DATE)
endDate = max(query.COLLECTION_DATE)
numSamples = query.shape[0]
wellCount = len(query.STATION_ID.unique())
stats = query.RESULT.describe().drop('count', axis=0)
stats = pd.DataFrame(stats).T
stats_col = [x for x in stats.columns]
result = {'Analyte Name': analyte_name, 'Start Date': startDate, 'End Date': endDate,
'Date Range (days)':endDate-startDate, '# unique wells': wellCount,'# samples': numSamples,
'Unit': self.get_unit(analyte_name) }
for num in range(len(stats_col)):
result[stats_col[num]] = stats.iloc[0][num]
info.append(result)
details = pd.DataFrame(info)
details.index = details['Analyte Name']
details = details.drop('Analyte Name', axis=1)
if(sort_by.lower() == 'date'):
details = details.sort_values(by=['Start Date', 'End Date', 'Date Range (days)'], ascending=ascending)
elif(sort_by.lower() == 'samples'):
details = details.sort_values(by=['# samples'], ascending=ascending)
elif(sort_by.lower() == 'wells'):
details = details.sort_values(by=['# unique wells'], ascending=ascending)
return details
# Description:
# Displays the analyte names available at given well locations.
# Parameters:
# well_name (string): name of the well. If left empty, all wells are returned.
# filter (bool): flag to indicate filtering
# col (string): column to filter results
# equals (list of strings): value to match column name. Multiple values are accepted.
def get_well_analytes(self, well_name=None, filter=False, col=None, equals=[]):
data = self.data
bb = "\033[1m"
be = "\033[0m"
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(data.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
data = data[data['STATION_ID'].isin(intersect_wells)]
if(well_name==None):
wells = list(data.STATION_ID.unique())
else:
wells = [well_name]
for well in wells:
print("{}{}{}".format(bb,str(well), be))
analytes = sorted(list(data[data.STATION_ID==well].ANALYTE_NAME.unique()))
print(str(analytes) +'\n')
# Description:
# Filters data by passing the data and specifying the well_name and analyte_name
# Parameters:
# well_name (string): name of the well to be processed
# analyte_name (string): name of the analyte to be processed
def query_data(self, well_name, analyte_name):
data = self.data
query = data[data.STATION_ID == well_name]
query = query[query.ANALYTE_NAME == analyte_name]
if(query.shape[0] == 0):
return 0
else:
return query
# Description:
# Plot concentrations over time of a specified well and analyte with a smoothed curve on interpolated data points.
# Parameters:
# well_name (string): name of the well to be processed
# analyte_name (string): name of the analyte to be processed
# log_transform (bool): choose whether or not the data should be transformed to log base 10 values
# alpha (int): value between 0 and 10 for line smoothing
# year_interval (int): plot by how many years to appear in the axis e.g.(1 = every year, 5 = every 5 years, ...)
# plot_inline (bool): choose whether or not to show plot inline
# save_dir (string): name of the directory you want to save the plot to
def plot_data(self, well_name, analyte_name, log_transform=True, alpha=0,
plot_inline=True, year_interval=2, x_label='Years', y_label='', save_dir='plot_data', filter=False, col=None, equals=[]):
# Gets appropriate data (well_name and analyte_name)
query = self.query_data(well_name, analyte_name)
query = self.simplify_data(data=query)
if(type(query)==int and query == 0):
return 'No results found for {} and {}'.format(well_name, analyte_name)
else:
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(query.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
query = query[query['STATION_ID'].isin(intersect_wells)]
x_data = query.COLLECTION_DATE
x_data = pd.to_datetime(x_data)
y_data = query.RESULT
if(log_transform):
y_data = np.log10(y_data)
# Remove any NaN as a result of the log transformation
nans = ~np.isnan(y_data)
x_data = x_data[nans]
y_data = y_data[nans]
x_RR = x_data.astype(int).to_numpy()
# Remove any duplicate dates
unique = ~pd.Series(x_data).duplicated()
x_data = x_data[unique]
y_data = y_data[unique]
unique = ~pd.Series(y_data).duplicated()
x_data = x_data[unique]
y_data = y_data[unique]
x_RR = x_data.astype(int).to_numpy()
nu = x_data.shape[0]
result = None
while result is None:
if(nu < 5):
return 'ERROR: Could not plot {}, {}'.format(well_name, analyte_name)
break
nu = nu - 1
x_data = x_data[:nu]
x_RR = x_RR[:nu]
y_data = y_data[:nu]
try:
# fit the supersmoother model
model = SuperSmoother(alpha=alpha)
model.fit(x_RR, y_data)
y_pred = model.predict(x_RR)
r = model.cv_residuals()
out = abs(r) > 2.2*np.std(r)
out_x = x_data[out]
out_y = y_data[out]
plt.figure(figsize=(8,8))
ax = plt.axes()
years = mdates.YearLocator(year_interval) # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_horizontalalignment('center')
ax = plt.gca()
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.autoscale_view()
unit = query.RESULT_UNITS.values[0]
ax.set_title(well_name + ' - ' + analyte_name, fontweight='bold')
ttl = ax.title
ttl.set_position([.5, 1.05])
if(y_label==''):
if(log_transform):
ax.set_ylabel('log-Concentration (' + unit + ')')
else:
ax.set_ylabel('Concentration (' + unit + ')')
else:
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
small_fontSize = 15
large_fontSize = 20
plt.rc('axes', titlesize=large_fontSize)
plt.rc('axes', labelsize=large_fontSize)
plt.rc('legend', fontsize=small_fontSize)
plt.rc('xtick', labelsize=small_fontSize)
plt.rc('ytick', labelsize=small_fontSize)
ax.plot(x_data, y_data, ls='', marker='o', ms=5, color='black', alpha=1)
ax.plot(x_data, y_pred, ls='-', marker='', ms=5, color='black', alpha=0.5, label="Super Smoother")
ax.plot(out_x , out_y, ls='', marker='o', ms=5, color='red', alpha=1, label="Outliers")
ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left', borderaxespad=0.)
props = dict(boxstyle='round', facecolor='grey', alpha=0.15)
ax.text(1.05, 0.85, 'Samples: {}'.format(nu), transform=ax.transAxes,
fontsize=small_fontSize,
fontweight='bold',
verticalalignment='top',
bbox=props)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
plt.savefig(save_dir + '/' + well_name + '-' + analyte_name +'.png', bbox_inches="tight")
if(plot_inline):
plt.show()
plt.clf()
plt.cla()
plt.close()
result = 1
except:
pass
# Description:
# Plot concentrations over time for every well and analyte with a smoothed curve on interpolated data points.
# Parameters:
# log_transform (bool): choose whether or not the data should be transformed to log base 10 values
# alpha (int): value between 0 and 10 for line smoothing
# year_interval (int): plot by how many years to appear in the axis e.g.(1 = every year, 5 = every 5 years, ...)
# plot_inline (bool): choose whether or not to show plot inline
# save_dir (string): name of the directory you want to save the plot to
def plot_all_data(self, log_transform=True, alpha=0, year_interval=2, plot_inline=True, save_dir='plot_data'):
analytes = ['TRITIUM','URANIUM-238','IODINE-129','SPECIFIC CONDUCTANCE', 'PH', 'DEPTH_TO_WATER']
wells = np.array(data.STATION_ID.values)
wells = np.unique(wells)
success = 0
errors = 0
for well in wells:
for analyte in analytes:
plot = self.plot_data(well, analyte,
log_transform=log_transform,
alpha=alpha,
year_interval=year_interval,
plot_inline=plot_inline,
save_dir=save_dir)
if 'ERROR:' in str(plot):
errors = errors + 1
else:
success = success + 1
print("Success: ", success)
print("Errors: ", errors)
# Description:
# Plots a heatmap of the correlations of the important analytes over time for a specified well.
# Parameters:
# well_name (string): name of the well to be processed
# show_symmetry (bool): choose whether or not the heatmap should show the same information twice over the diagonal
# color (bool): choose whether or not the plot should be in color or in greyscale
# save_dir (string): name of the directory you want to save the plot to
def plot_correlation_heatmap(self, well_name, show_symmetry=True, color=True, save_dir='plot_correlation_heatmap'):
data = self.data
query = data[data.STATION_ID == well_name]
a = list(np.unique(query.ANALYTE_NAME.values))
b = ['TRITIUM','IODINE-129','SPECIFIC CONDUCTANCE', 'PH','URANIUM-238', 'DEPTH_TO_WATER']
analytes = self.__custom_analyte_sort(list(set(a) and set(b)))
query = query.loc[query.ANALYTE_NAME.isin(analytes)]
analytes = self.__custom_analyte_sort(np.unique(query.ANALYTE_NAME.values))
x = query[['COLLECTION_DATE', 'ANALYTE_NAME']]
unique = ~x.duplicated()
query = query[unique]
piv = query.reset_index().pivot(index='COLLECTION_DATE',columns='ANALYTE_NAME', values='RESULT')
piv = piv[analytes]
totalSamples = piv.shape[0]
piv = piv.dropna()
samples = piv.shape[0]
if(samples < 5):
return 'ERROR: {} does not have enough samples to plot.'.format(well_name)
else:
scaler = StandardScaler()
pivScaled = scaler.fit_transform(piv)
pivScaled = pd.DataFrame(pivScaled, columns=piv.columns)
pivScaled.index = piv.index
corr_matrix = pivScaled.corr()
if(show_symmetry):
mask = None
else:
mask = np.triu(corr_matrix)
if(color):
cmap = 'RdBu'
else:
cmap = 'binary'
fig, ax = plt.subplots(figsize=(8,6))
ax.set_title(well_name + '_correlation', fontweight='bold')
ttl = ax.title
ttl.set_position([.5, 1.05])
props = dict(boxstyle='round', facecolor='grey', alpha=0.15)
ax.text(1.3, 1.05, 'Start date: {}\nEnd date: {}\n\nSamples: {} of {}'.format(piv.index[0], piv.index[-1], samples, totalSamples), transform=ax.transAxes, fontsize=15, fontweight='bold', verticalalignment='bottom', bbox=props)
ax = sns.heatmap(corr_matrix,
ax=ax,
mask=mask,
vmin=-1, vmax=1,
xticklabels=corr_matrix.columns,
yticklabels=corr_matrix.columns,
cmap=cmap,
annot=True,
linewidths=1,
cbar_kws={'orientation': 'vertical'})
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fig.savefig(save_dir + '/' + well_name + '_correlation.png', bbox_inches="tight")
# Description:
# Plots a heatmap of the correlations of the important analytes over time for each well in the dataset.
# Parameters:
# show_symmetry (bool): choose whether or not the heatmap should show the same information twice over the diagonal
# color (bool): choose whether or not the plot should be in color or in greyscale
# save_dir (string): name of the directory you want to save the plot to
def plot_all_correlation_heatmap(self, show_symmetry=True, color=True, save_dir='plot_correlation_heatmap'):
data = self.data
wells = np.array(data.STATION_ID.values)
wells = np.unique(wells)
for well in wells:
self.plot_correlation_heatmap(well_name=well,
show_symmetry=show_symmetry,
color=color,
save_dir=save_dir)
# Description:
# Resamples the data based on the frequency specified and interpolates the values of the analytes.
# Parameters:
# well_name (string): name of the well to be processed
# analytes (list of strings): list of analyte names to use
# frequency (string): {‘D’, ‘W’, ‘M’, ‘Y’} frequency to interpolate.
# See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html for valid frequency inputs. (e.g. ‘W’ = every week, ‘D ’= every day, ‘2W’ = every 2 weeks)
def interpolate_well_data(self, well_name, analytes, frequency='2W'):
data = self.data
inter_series = {}
query = data[data.STATION_ID == well_name]
for analyte in analytes:
series = query[query.ANALYTE_NAME == analyte]
series = (series[['COLLECTION_DATE', 'RESULT']])
series.COLLECTION_DATE = pd.to_datetime(series.COLLECTION_DATE)
series.index = series.COLLECTION_DATE
original_dates = series.index
series = series.drop('COLLECTION_DATE', axis=1)
series = series.rename({'RESULT': analyte}, axis=1)
upsampled = series.resample(frequency).mean()
interpolated = upsampled.interpolate(method='linear', order=2)
inter_series[analyte] = interpolated
join = inter_series[analytes[0]]
join = join.drop(analytes[0], axis=1)
for analyte in analytes:
join = join.join(inter_series[analyte])
join = join.dropna()
return join
# Description:
# Plots the correlations with the physical plots as well as the correlations of the important analytes over time for a specified well.
# Parameters:
# well_name (string): name of the well to be processed
# analytes (list of strings): list of analyte names to use
# remove_outliers (bool): choose whether or to remove the outliers.
# z_threshold (float): z_score threshold to eliminate outliers
# interpolate (bool): choose whether or to interpolate the data
# frequency (string): {‘D’, ‘W’, ‘M’, ‘Y’} frequency to interpolate. See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html for valid frequency inputs. (e.g. ‘W’ = every week, ‘D ’= every day, ‘2W’ = every 2 weeks)
# save_dir (string): name of the directory you want to save the plot to
def plot_corr_by_well(self, well_name, analytes, remove_outliers=True, z_threshold=4, interpolate=False, frequency='2W', save_dir='plot_correlation', log_transform=False, fontsize=20, returnData=False, remove=[]):
data = self.data
query = data[data.STATION_ID == well_name]
a = list(np.unique(query.ANALYTE_NAME.values))# get all analytes from dataset
for value in analytes:
if((value in a)==False):
return 'ERROR: No analyte named "{}" in data.'.format(value)
analytes = sorted(analytes)
query = query.loc[query.ANALYTE_NAME.isin(analytes)]
x = query[['COLLECTION_DATE', 'ANALYTE_NAME']]
unique = ~x.duplicated()
query = query[unique]
piv = query.reset_index().pivot(index='COLLECTION_DATE',columns='ANALYTE_NAME', values='RESULT')
piv = piv[analytes]
piv.index = pd.to_datetime(piv.index)
totalSamples = piv.shape[0]
piv = piv.dropna()
if(interpolate):
piv = self.interpolate_well_data(well_name, analytes, frequency=frequency)
file_extension = '_interpolated_' + frequency
title = well_name + '_correlation - interpolated every ' + frequency
else:
file_extension = '_correlation'
title = well_name + '_correlation'
samples = piv.shape[0]
if(samples < 5):
if(interpolate):
return 'ERROR: {} does not have enough samples to plot.\n Try a different interpolation frequency'.format(well_name)
return 'ERROR: {} does not have enough samples to plot.'.format(well_name)
else:
# scaler = StandardScaler()
# pivScaled = scaler.fit_transform(piv)
# pivScaled = pd.DataFrame(pivScaled, columns=piv.columns)
# pivScaled.index = piv.index
# piv = pivScaled
if(log_transform):
piv[piv <= 0] = 0.00000001
piv = np.log10(piv)
# Remove outliers
if(remove_outliers):
piv = self.remove_outliers(piv, z_threshold=z_threshold)
samples = piv.shape[0]
idx = piv.index.date
dates = [dates.strftime('%Y-%m-%d') for dates in idx]
remaining = [i for i in dates if i not in remove]
piv = piv.loc[remaining]
sns.set_style("white", {"axes.facecolor": "0.95"})
g = sns.PairGrid(piv, aspect=1.2, diag_sharey=False, despine=False)
g.fig.suptitle(title, fontweight='bold', y=1.08, fontsize=25)
g.map_lower(sns.regplot, lowess=True, ci=False, line_kws={'color': 'red', 'lw': 3},
scatter_kws={'color': 'black', 's': 20})
g.map_diag(sns.distplot, kde_kws={'color': 'black', 'lw': 3}, hist_kws={'histtype': 'bar', 'lw': 2, 'edgecolor': 'k', 'facecolor':'grey'})
g.map_upper(self.__plotUpperHalf)
for ax in g.axes.flat:
ax.tick_params("y", labelrotation=0, labelsize=fontsize)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, fontsize=fontsize)
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize, fontweight='bold') #HERE
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize,fontweight='bold')
g.fig.subplots_adjust(wspace=0.3, hspace=0.3)
ax = plt.gca()
props = dict(boxstyle='round', facecolor='grey', alpha=0.15)
ax.text(1.3, 6.2, 'Start date: {}\nEnd date: {}\n\nOriginal samples: {}\nSamples used: {}'.format(piv.index[0].date(), piv.index[-1].date(), totalSamples, samples), transform=ax.transAxes, fontsize=20, fontweight='bold', verticalalignment='bottom', bbox=props)
# Add titles to the diagonal axes/subplots
for ax, col in zip(np.diag(g.axes), piv.columns):
ax.set_title(col, y=0.82, fontsize=15)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
g.fig.savefig(save_dir + '/' + well_name + file_extension + '.png', bbox_inches="tight")
if(returnData):
return piv
# Description:
# Plots the correlations with the physical plots as well as the important analytes over time for each well in the dataset.
# Parameters:
# analytes (list of strings): list of analyte names to use
# remove_outliers (bool): choose whether or to remove the outliers.
# z_threshold (float): z_score threshold to eliminate outliers
# interpolate (bool): choose whether or to interpolate the data
# frequency (string): {‘D’, ‘W’, ‘M’, ‘Y’} frequency to interpolate. See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html for valid frequency inputs. (e.g. ‘W’ = every week, ‘D ’= every day, ‘2W’ = every 2 weeks)
# save_dir (string): name of the directory you want to save the plot to
def plot_all_corr_by_well(self, analytes, remove_outliers=True, z_threshold=4, interpolate=False, frequency='2W', save_dir='plot_correlation', log_transform=False, fontsize=20):
data = self.data
wells = np.array(data.STATION_ID.values)
wells = np.unique(wells)
for well in wells:
self.plot_corr_by_well(well_name=well, analytes=analytes,remove_outliers=remove_outliers, z_threshold=z_threshold, interpolate=interpolate, frequency=frequency, save_dir=save_dir, log_transform=log_transform, fontsize=fontsize)
# Description:
# Plots the correlations with the physical plots as well as the correlations of the important analytes for ALL the wells on a specified date or range of dates if a lag greater than 0 is specifed.
# Parameters:
# date (string): date to be analyzed
# analytes (list of strings): list of analyte names to use
# lag (int): number of days to look ahead and behind the specified date (+/-)
# min_samples (int): minimum number of samples the result should contain in order to execute.
# save_dir (string): name of the directory you want to save the plot to
def plot_corr_by_date_range(self, date, analytes, lag=0, min_samples=10, save_dir='plot_corr_by_date', log_transform=False, fontsize=20, returnData=False):
if(lag==0):
data = self.data
data = self.simplify_data(data=data)
query = data[data.COLLECTION_DATE == date]
a = list(np.unique(query.ANALYTE_NAME.values))# get all analytes from dataset
for value in analytes:
if((value in a)==False):
return 'ERROR: No analyte named "{}" in data.'.format(value)
analytes = sorted(analytes)
query = query.loc[query.ANALYTE_NAME.isin(analytes)]
if(query.shape[0] == 0):
return 'ERROR: {} has no data for all of the analytes.'.format(date)
samples = query[['COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME']].duplicated().value_counts()[0]
if(samples < min_samples):
return 'ERROR: {} does not have at least {} samples.'.format(date, min_samples)
else:
piv = query.reset_index().pivot_table(index = 'STATION_ID', columns='ANALYTE_NAME', values='RESULT',aggfunc=np.mean)
# return piv
else:
# If the data has already been calculated with the lag specified, retrieve it
if(self.jointData_is_set(lag=lag)==True):
data = self.__jointData[0]
# Otherwise, calculate it
else:
data = self.getJointData(analytes, lag=lag)
self.set_jointData(data=data, lag=lag)
# get new range based on the lag and create the pivor table to be able to do the correlation
dateStart, dateEnd = self.__getLagDate(date, lagDays=lag)
dateRange_key = str(dateStart.date()) + " - " + str(dateEnd.date())
piv = pd.DataFrame(data.loc[dateRange_key]).unstack().T
piv.index = piv.index.droplevel()
piv = pd.DataFrame(piv).dropna(axis=0, how='all')
num_NaNs = int(piv.isnull().sum().sum())
samples = (piv.shape[0]*piv.shape[1])-num_NaNs
for col in piv.columns:
piv[col] = piv[col].astype('float64', errors = 'raise')
if(lag>0):
date = dateRange_key
# return piv
title = date + '_correlation'
# scaler = StandardScaler()
# pivScaled = scaler.fit_transform(piv)
# pivScaled = pd.DataFrame(pivScaled, columns=piv.columns)
# pivScaled.index = piv.index
# piv = pivScaled
if(log_transform):
piv[piv <= 0] = 0.00000001
piv = np.log10(piv)
sns.set_style("white", {"axes.facecolor": "0.95"})
g = sns.PairGrid(piv, aspect=1.2, diag_sharey=False, despine=False)
g.fig.suptitle(title, fontweight='bold', y=1.08, fontsize=25)
g.map_lower(sns.regplot, lowess=True, ci=False, line_kws={'color': 'red', 'lw': 3},
scatter_kws={'color': 'black', 's': 20})
g.map_diag(sns.distplot, kde_kws={'color': 'black', 'lw': 3}, hist_kws={'histtype': 'bar', 'lw': 2, 'edgecolor': 'k', 'facecolor':'grey'})
g.map_upper(self.__plotUpperHalf)
for ax in g.axes.flat:
ax.tick_params("y", labelrotation=0, labelsize=fontsize)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, fontsize=fontsize)
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize, fontweight='bold') #HERE
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize,fontweight='bold')
g.fig.subplots_adjust(wspace=0.3, hspace=0.3)
ax = plt.gca()
props = dict(boxstyle='round', facecolor='grey', alpha=0.15)
ax.text(1.3, 3, 'Date: {}\n\nWells: {}\nSamples used: {}'.format(date, piv.shape[0] ,samples), transform=ax.transAxes, fontsize=20, fontweight='bold', verticalalignment='bottom', bbox=props)
# Add titles to the diagonal axes/subplots
for ax, col in zip(np.diag(g.axes), piv.columns):
ax.set_title(col, y=0.82, fontsize=15)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
g.fig.savefig(save_dir + '/' + date + '.png', bbox_inches="tight")
if(returnData):
return piv
# Description:
# Plots the correlations with the physical plots as well as the correlations of the important analytes for ALL the wells in specified year.
# Parameters:
# year (int): year to be analyzed
# analytes (list of strings): list of analyte names to use
# remove_outliers (bool): choose whether or to remove the outliers.
# z_threshold (float): z_score threshold to eliminate outliers
# min_samples (int): minimum number of samples the result should contain in order to execute.
# save_dir (string): name of the directory you want to save the plot to
def plot_corr_by_year(self, year, analytes, remove_outliers=True, z_threshold=4, min_samples=10, save_dir='plot_corr_by_year', log_transform=False, fontsize=20, returnData=False):
data = self.data
query = data
query = self.simplify_data(data=query)
query.COLLECTION_DATE = pd.to_datetime(query.COLLECTION_DATE)
query = query[query.COLLECTION_DATE.dt.year == year]
a = list(np.unique(query.ANALYTE_NAME.values))# get all analytes from dataset
for value in analytes:
if((value in a)==False):
return 'ERROR: No analyte named "{}" in data.'.format(value)
analytes = sorted(analytes)
query = query.loc[query.ANALYTE_NAME.isin(analytes)]
if(query.shape[0] == 0):
return 'ERROR: {} has no data for the 6 analytes.'.format(year)
samples = query[['COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME']].duplicated().value_counts()[0]
if(samples < min_samples):
return 'ERROR: {} does not have at least {} samples.'.format(date, min_samples)
else:
piv = query.reset_index().pivot_table(index = 'STATION_ID', columns='ANALYTE_NAME', values='RESULT',aggfunc=np.mean)
# return piv
# Remove outliers
if(remove_outliers):
piv = self.remove_outliers(piv, z_threshold=z_threshold)
samples = piv.shape[0] * piv.shape[1]
title = str(year) + '_correlation'
# scaler = StandardScaler()
# pivScaled = scaler.fit_transform(piv)
# pivScaled = pd.DataFrame(pivScaled, columns=piv.columns)
# pivScaled.index = piv.index
# piv = pivScaled
if(log_transform):
piv[piv <= 0] = 0.00000001
piv = np.log10(piv)
sns.set_style("white", {"axes.facecolor": "0.95"})
g = sns.PairGrid(piv, aspect=1.2, diag_sharey=False, despine=False)
g.fig.suptitle(title, fontweight='bold', y=1.08, fontsize=25)
g.map_lower(sns.regplot, lowess=True, ci=False, line_kws={'color': 'red', 'lw': 3},
scatter_kws={'color': 'black', 's': 20})
g.map_diag(sns.distplot, kde_kws={'color': 'black', 'lw': 3}, hist_kws={'histtype': 'bar', 'lw': 2, 'edgecolor': 'k', 'facecolor':'grey'})
g.map_upper(self.__plotUpperHalf)
for ax in g.axes.flat:
ax.tick_params("y", labelrotation=0, labelsize=fontsize)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, fontsize=fontsize)
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize, fontweight='bold') #HERE
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize,fontweight='bold')
g.fig.subplots_adjust(wspace=0.3, hspace=0.3)
ax = plt.gca()
props = dict(boxstyle='round', facecolor='grey', alpha=0.15)
ax.text(1.3, 3, 'Date: {}\n\nSamples used: {}'.format(year, samples), transform=ax.transAxes, fontsize=20, fontweight='bold', verticalalignment='bottom', bbox=props)
# Add titles to the diagonal axes/subplots
for ax, col in zip(np.diag(g.axes), piv.columns):
ax.set_title(col, y=0.82, fontsize=15)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
g.fig.savefig(save_dir + '/' + str(year) + '.png', bbox_inches="tight")
if(returnData):
return piv
# Description:
# Plots the linear regression line of data given the analyte_name and well_name. The plot includes the prediction where the line of best fit intersects with the Maximum Concentration Limit (MCL).
# Parameters:
# well_name (string): name of the well to be processed
# analyte_name (string): name of the analyte to be processed
# year_interval (int): plot by how many years to appear in the axis e.g.(1 = every year, 5 = every 5 years, ...)
# save_dir (string): name of the directory you want to save the plot to
def plot_MCL(self, well_name, analyte_name, year_interval=5, save_dir='plot_MCL'):
data = self.data
# finds the intersection point of 2 lines given the slopes and y-intercepts
def line_intersect(m1, b1, m2, b2):
if m1 == m2:
print ('The lines are parallel')
return None
x = (b2 - b1) / (m1 - m2)
y = m1 * x + b1
return x,y
# Gets appropriate data (well_name and analyte_name)
query = self.query_data(well_name, analyte_name)
if(type(query)==int and query == 0):
return 'No results found for {} and {}'.format(well_name, analyte_name)
else:
test = query.groupby(['COLLECTION_DATE']).mean()
test.index = pd.to_datetime(test.index)
x = date2num(test.index)
y = np.log10(test.RESULT)
ylabel = 'log-Concentration (' + self.get_unit(analyte_name) + ')'
y = y.rename(ylabel)
p, cov = np.polyfit(x, y, 1, cov=True) # parameters and covariance from of the fit of 1-D polynom.
m_unc = np.sqrt(cov[0][0])
b_unc = np.sqrt(cov[1][1])
f = np.poly1d(p)
try:
MCL = self.get_MCL(analyte_name)
m1, b1 = f # line of best fit
m2, b2 = 0, MCL # MCL constant
intersection = line_intersect(m1, b1, m2, b2)
## Get confidence interval intersection points with MCL
data = list(zip(x,y))
n = len(data)
list_slopes = []
list_intercepts = []
random.seed(50)
for _ in range(80):
sampled_data = [ random.choice(data) for _ in range(n) ]
x_s, y_s = zip(*sampled_data)
x_s = np.array(x_s)
y_s = np.array(y_s)
m_s, b_s, r, p, err = scipy.stats.linregress(x_s,y_s)
ymodel = m_s*x_s + b_s
list_slopes.append(m_s)
list_intercepts.append(b_s)
max_index = list_slopes.index(max(list_slopes))
min_index = list_slopes.index(min(list_slopes))
intersection_left = line_intersect(list_slopes[min_index], list_intercepts[min_index], m2, b2)
intersection_right = line_intersect(list_slopes[max_index], list_intercepts[max_index], m2, b2)
##
fig, ax = plt.subplots(figsize=(10, 6))
ax.set_title(well_name + ' - ' + analyte_name, fontweight='bold')
ttl = ax.title
ttl.set_position([.5, 1.05])
years = mdates.YearLocator(year_interval) # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax = plt.gca()
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.autoscale_view()
ax.grid(True, alpha=0.4)
small_fontSize = 15
large_fontSize = 20
plt.rc('axes', titlesize=large_fontSize)
plt.rc('axes', labelsize=large_fontSize)
plt.rc('legend', fontsize=small_fontSize)
plt.rc('xtick', labelsize=small_fontSize)
plt.rc('ytick', labelsize=small_fontSize)
ax.set_xlabel('Years')
ax.set_ylabel('log-Concentration (' + self.get_unit(analyte_name) + ')')
if(intersection[0] < min(x)):
temp = intersection_left
intersection_left = intersection_right
intersection_right = temp
ax.set_ylim([0, max(y)+1])
ax.set_xlim([intersection_left[0]-1000, max(x)+1000])
elif(intersection[0] < max(x) and intersection[0] > min(x)):
ax.set_ylim([0, max(y)+1])
ax.set_xlim(min(x)-1000, max(x)+1000)
else:
ax.set_ylim([0, max(y)+1])
ax.set_xlim([min(x)-1000, intersection_right[0]+1000])
ax = sns.regplot(x, y, logx=True, truncate=False, seed=42, n_boot=1000, ci=95) # Line of best fit
ax.plot(x, y, ls='', marker='o', ms=5, color='black', alpha=1) # Data
ax.axhline(y=MCL, color='r', linestyle='--') # MCL
ax.plot(intersection[0], intersection[1], color='blue', marker='o', ms=10)
ax.plot(intersection_left[0], intersection_left[1], color='green', marker='o', ms=5)
ax.plot(intersection_right[0], intersection_right[1], color='green', marker='o', ms=5)
predict = num2date(intersection[0]).date()
l_predict = num2date(intersection_left[0]).date()
u_predict = num2date(intersection_right[0]).date()
ax.annotate(predict, (intersection[0], intersection[1]), xytext=(intersection[0], intersection[1]+1),
bbox=dict(boxstyle="round", alpha=0.1),ha='center', arrowprops=dict(arrowstyle="->", color='blue'), fontsize=small_fontSize, fontweight='bold')
props = dict(boxstyle='round', facecolor='grey', alpha=0.15)
ax.text(1.1, 0.5, 'Lower confidence: {}\n Prediction: {}\nUpper confidence: {}'.format(l_predict, predict, u_predict), transform=ax.transAxes, fontsize=small_fontSize, fontweight='bold', verticalalignment='bottom', bbox=props)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
plt.savefig(save_dir + '/' + well_name + '-' + analyte_name +'.png', bbox_inches="tight")
except:
print('ERROR: Something went wrong')
return None
# Description:
# Gernates a PCA biplot (PCA score plot + loading plot) of the data given a date in the dataset. The data is also clustered into n_clusters.
# Parameters:
# date (string): date to be analyzed
# analytes (list of strings): list of analyte names to use
# lag (int): number of days to look ahead and behind the specified date (+/-)
# n_clusters (int): number of clusters to split the data into.
# filter (bool): Flag to indicate well filtering.
# col (string): column name from the construction dataset that you want to filter by
# equals (list of strings): value(s) to filter by in column col
# return_clusters (bool): Flag to return the cluster data to be used for spatial plotting.
# min_samples (int): minimum number of samples the result should contain in order to execute.
# show_labels (bool): choose whether or not to show the name of the wells.
# save_dir (string): name of the directory you want to save the plot to
def plot_PCA_by_date(self, date, analytes, lag=0, n_clusters=4, return_clusters=False, min_samples=3, show_labels=True, save_dir='plot_PCA_by_date', filter=False, col=None, equals=[]):
if(lag==0):
data = self.data
data = self.simplify_data(data=data)
query = data[data.COLLECTION_DATE == date]
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(query.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
query = query[query['STATION_ID'].isin(intersect_wells)]
a = list(np.unique(query.ANALYTE_NAME.values))# get all analytes from dataset
for value in analytes:
if((value in a)==False):
return 'ERROR: No analyte named "{}" in data.'.format(value)
analytes = sorted(analytes)
query = query.loc[query.ANALYTE_NAME.isin(analytes)]
if(query.shape[0] == 0):
return 'ERROR: {} has no data for the 6 analytes.'.format(date)
samples = query[['COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME']].duplicated().value_counts()[0]
if(samples < min_samples):
return 'ERROR: {} does not have at least {} samples.'.format(date, min_samples)
# if(len(np.unique(query.ANALYTE_NAME.values)) < 6):
# return 'ERROR: {} has less than the 6 analytes we want to analyze.'.format(date)
else:
# analytes = self.__custom_analyte_sort(np.unique(query.ANALYTE_NAME.values))
analytes = sorted(analytes)
piv = query.reset_index().pivot_table(index = 'STATION_ID', columns='ANALYTE_NAME', values='RESULT',aggfunc=np.mean)
# return piv
else:
# If the data has already been calculated with the lag specified, retrieve it
if(self.jointData_is_set(lag=lag)==True):
data = self.__jointData[0]
# Otherwise, calculate it
else:
data = self.getJointData(analytes, lag=lag)
self.set_jointData(data=data, lag=lag)
# get new range based on the lag and create the pivor table to be able to do the correlation
dateStart, dateEnd = self.__getLagDate(date, lagDays=lag)
dateRange_key = str(dateStart.date()) + " - " + str(dateEnd.date())
piv = pd.DataFrame(data.loc[dateRange_key]).unstack().T
piv.index = piv.index.droplevel()
piv = pd.DataFrame(piv).dropna(axis=0, how='all')
num_NaNs = int(piv.isnull().sum().sum())
samples = (piv.shape[0]*piv.shape[1])-num_NaNs
for col in piv.columns:
piv[col] = piv[col].astype('float64', errors = 'raise')
if(lag>0):
date = dateRange_key
# return piv
main_data = piv.dropna()
scaler = StandardScaler()
X = scaler.fit_transform(main_data)
pca = PCA(n_components=2)
x_new = pca.fit_transform(X)
pca_points = pd.DataFrame(x_new, columns=["x1", "x2"])
k_Means = KMeans(n_clusters=n_clusters, random_state=42)
model = k_Means.fit(pca_points[['x1', 'x2']])
predict = model.predict(pca_points[['x1', 'x2']])
# attach predicted cluster to original points
pca_points['predicted'] = model.labels_
# Create a dataframe for cluster_centers (centroids)
centroids = pd.DataFrame(model.cluster_centers_, columns=["x1", "x2"])
colors = ['red', 'blue', 'orange', 'purple', 'green', 'beige', 'pink', 'black', 'cadetblue', 'lightgreen']
pca_points['color'] = pca_points['predicted'].map(lambda p: colors[p])
fig, ax = plt.subplots(figsize=(10,10))
ax = plt.axes()
small_fontSize = 15
large_fontSize = 20
plt.rc('axes', titlesize=large_fontSize)
plt.rc('axes', labelsize=large_fontSize)
plt.rc('legend', fontsize=small_fontSize)
plt.rc('xtick', labelsize=small_fontSize)
plt.rc('ytick', labelsize=small_fontSize)
def myplot(score,coeff,labels=None,c='r', centroids=None):
xs = score.iloc[:,0]
ys = score.iloc[:,1]
n = coeff.shape[0]
scalex = 1.0/(xs.max() - xs.min())
scaley = 1.0/(ys.max() - ys.min())
scatt_X = xs * scalex
scatt_Y = ys * scaley
scatter = plt.scatter(scatt_X, scatt_Y, alpha=0.8, label='Wells', c=c)
centers = plt.scatter(centroids.iloc[:,0]* scalex, centroids.iloc[:,1]* scaley,
c = colors[0:n_clusters],
marker='X', s=550)
for i in range(n):
arrow = plt.arrow(0, 0, coeff[i,0], coeff[i,1], color = 'r', alpha = 0.9, head_width=0.05, head_length=0.05, label='Loadings')
if labels is None:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, "Var"+str(i+1), color = 'g', ha = 'center', va = 'center')
else:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, labels[i], color = 'g', ha = 'center', va = 'bottom')
if(show_labels):
for x_pos, y_pos, label in zip(scatt_X, scatt_Y, main_data.index):
ax.annotate(label, # The label for this point
xy=(x_pos, y_pos), # Position of the corresponding point
xytext=(7, 0), # Offset text by 7 points to the right
textcoords='offset points', # tell it to use offset points
ha='left', # Horizontally aligned to the left
va='center', # Vertical alignment is centered
color='black', alpha=0.8)
plt.legend( [scatter, centers, arrow], ['Wells', 'Well centroids','Loadings'])
samples = x_new.shape[0]*piv.shape[1]
props = dict(boxstyle='round', facecolor='grey', alpha=0.15)
ax.text(1.1, 0.5, 'Date: {}\n\nSamples: {}\nWells: {}'.format(date, samples, x_new.shape[0]),
transform=ax.transAxes, fontsize=20, fontweight='bold', verticalalignment='bottom', bbox=props)
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
ax.set_title('PCA Biplot - ' + date, fontweight='bold')
plt.grid(alpha=0.5)
#Call the function. Use only the 2 PCs.
myplot(pca_points,np.transpose(pca.components_[0:2, :]), labels=piv.columns, c=pca_points['color'], centroids=centroids)
plt.show()
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fig.savefig(save_dir + '/' + 'PCA Biplot - '+ date +'.png', bbox_inches="tight")
if(return_clusters):
stations = list(main_data.index)
color_wells = list(pca_points.color)
def merge(list1, list2):
merged_list = [(list1[i], list2[i]) for i in range(0, len(list1))]
return merged_list
color_df = pd.DataFrame(merge(stations, color_wells), columns=['STATION_ID', 'color'])
if(self.get_Construction_Data==None):
print('You need to set the GPS data first using the getConstructionData function.')
return None
else:
gps_color = pd.merge(self.get_Construction_Data(), color_df, on=['STATION_ID'])
return gps_color
# Description:
# Gernates a PCA biplot (PCA score plot + loading plot) of the data given a year in the dataset. The data is also clustered into n_clusters.
# Parameters:
# year (int): year to be analyzed
# analytes (list of strings): list of analyte names to use
# lag (int): number of days to look ahead and behind the specified date (+/-)
# n_clusters (int): number of clusters to split the data into.
# filter (bool): Flag to indicate well filtering.
# col (string): column name from the construction dataset that you want to filter by
# equals (list of strings): value(s) to filter by in column col
# return_clusters (bool): Flag to return the cluster data to be used for spatial plotting.
# min_samples (int): minimum number of samples the result should contain in order to execute.
# show_labels (bool): choose whether or not to show the name of the wells.
# save_dir (string): name of the directory you want to save the plot to
def plot_PCA_by_year(self, year, analytes, n_clusters=4, return_clusters=False, min_samples=10, show_labels=True, save_dir='plot_PCA_by_year', filter=False, col=None, equals=[]):
data = self.data
query = self.simplify_data(data=data)
query.COLLECTION_DATE = pd.to_datetime(query.COLLECTION_DATE)
query = query[query.COLLECTION_DATE.dt.year == year]
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(query.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
query = query[query['STATION_ID'].isin(intersect_wells)]
a = list(np.unique(query.ANALYTE_NAME.values))# get all analytes from dataset
for value in analytes:
if((value in a)==False):
return 'ERROR: No analyte named "{}" in data.'.format(value)
analytes = sorted(analytes)
query = query.loc[query.ANALYTE_NAME.isin(analytes)]
if(query.shape[0] == 0):
return 'ERROR: {} has no data for the 6 analytes.'.format(year)
samples = query[['COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME']].duplicated().value_counts()[0]
if(samples < min_samples):
return 'ERROR: {} does not have at least {} samples.'.format(year, min_samples)
# if(len(np.unique(query.ANALYTE_NAME.values)) < 6):
# return 'ERROR: {} has less than the 6 analytes we want to analyze.'.format(year)
else:
# analytes = self.__custom_analyte_sort(np.unique(query.ANALYTE_NAME.values))
analytes = sorted(analytes)
piv = query.reset_index().pivot_table(index = 'STATION_ID', columns='ANALYTE_NAME', values='RESULT',aggfunc=np.mean)
main_data = piv.dropna()
# # FILTERING CODE
# if(filter):
# res_wells = self.filter_wells(filter_well_by)
# main_data = main_data.loc[main_data.index.isin(res_wells)]
scaler = StandardScaler()
X = scaler.fit_transform(main_data)
pca = PCA(n_components=2)
x_new = pca.fit_transform(X)
pca_points = pd.DataFrame(x_new, columns=["x1", "x2"])
k_Means = KMeans(n_clusters=n_clusters, random_state=42)
model = k_Means.fit(pca_points[['x1', 'x2']])
predict = model.predict(pca_points[['x1', 'x2']])
# attach predicted cluster to original points
pca_points['predicted'] = model.labels_
# Create a dataframe for cluster_centers (centroids)
centroids = pd.DataFrame(model.cluster_centers_, columns=["x1", "x2"])
colors = ['red', 'blue', 'orange', 'purple', 'green', 'beige', 'pink', 'black', 'cadetblue', 'lightgreen']
pca_points['color'] = pca_points['predicted'].map(lambda p: colors[p])
fig, ax = plt.subplots(figsize=(15,15))
ax = plt.axes()
small_fontSize = 15
large_fontSize = 20
plt.rc('axes', titlesize=large_fontSize)
plt.rc('axes', labelsize=large_fontSize)
plt.rc('legend', fontsize=small_fontSize)
plt.rc('xtick', labelsize=small_fontSize)
plt.rc('ytick', labelsize=small_fontSize)
def myplot(score,coeff,labels=None,c='r', centroids=None):
xs = score[:,0]
ys = score[:,1]
n = coeff.shape[0]
scalex = 1.0/(xs.max() - xs.min())
scaley = 1.0/(ys.max() - ys.min())
scatt_X = xs * scalex
scatt_Y = ys * scaley
scatter = plt.scatter(scatt_X, scatt_Y, alpha=0.8, label='Wells', c=c)
centers = plt.scatter(centroids.iloc[:,0]* scalex, centroids.iloc[:,1]* scaley,
c = colors[0:n_clusters],
marker='X', s=550)
for i in range(n):
arrow = plt.arrow(0, 0, coeff[i,0], coeff[i,1], color = 'r', alpha = 0.9, head_width=0.05, head_length=0.05)
if labels is None:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, "Var"+str(i+1), color = 'g', ha = 'center', va = 'center')
else:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, labels[i], color = 'g', ha = 'center', va = 'bottom')
if(show_labels):
for x_pos, y_pos, label in zip(scatt_X, scatt_Y, main_data.index):
ax.annotate(label, # The label for this point
xy=(x_pos, y_pos), # Position of the corresponding point
xytext=(7, 0), # Offset text by 7 points to the right
textcoords='offset points', # tell it to use offset points
ha='left', # Horizontally aligned to the left
va='center', color='black', alpha=0.8) # Vertical alignment is centered
plt.legend( [scatter, centers, arrow], ['Wells', 'Well centroids','Loadings'])
samples = x_new.shape[0]*piv.shape[1]
props = dict(boxstyle='round', facecolor='grey', alpha=0.15)
ax.text(1.1, 0.5, 'Date: {}\n\nSamples: {}\nWells: {}'.format(year,samples, x_new.shape[0]),
transform=ax.transAxes, fontsize=20, fontweight='bold', verticalalignment='bottom', bbox=props)
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
ax.set_title('PCA Biplot - ' + str(year), fontweight='bold')
plt.grid(alpha=0.5)
#Call the function. Use only the 2 PCs.
myplot(x_new[:,0:2],np.transpose(pca.components_[0:2, :]), labels=piv.columns, c=pca_points['color'], centroids=centroids)
plt.show()
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fig.savefig(save_dir + '/' + 'PCA Biplot - '+ str(year) +'.png', bbox_inches="tight")
if(return_clusters):
stations = list(main_data.index)
color_wells = list(pca_points.color)
def merge(list1, list2):
merged_list = [(list1[i], list2[i]) for i in range(0, len(list1))]
return merged_list
color_df = pd.DataFrame(merge(stations, color_wells), columns=['STATION_ID', 'color'])
if(self.get_Construction_Data==None):
print('You need to set the GPS data first using the setConstructionData function.')
return None
else:
gps_color = pd.merge(self.get_Construction_Data(), color_df, on=['STATION_ID'])
return gps_color
# Description:
# Gernates a PCA biplot (PCA score plot + loading plot) of the data given a well_name in the dataset. Only uses the 6 important analytes.
# Parameters:
# well_name (string): name of the well to be processed
# interpolate (bool): choose whether or to interpolate the data
# frequency (string): {‘D’, ‘W’, ‘M’, ‘Y’} frequency to interpolate. See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html for valid frequency inputs. (e.g. ‘W’ = every week, ‘D ’= every day, ‘2W’ = every 2 weeks)
# min_samples (int): minimum number of samples the result should contain in order to execute.
# show_labels (bool): choose whether or not to show the name of the wells.
# save_dir (string): name of the directory you want to save the plot to
def plot_PCA_by_well(self, well_name, analytes, interpolate=False, frequency='2W', min_samples=10, show_labels=True, save_dir='plot_PCA_by_well'):
data = self.data
query = data[data.STATION_ID == well_name]
a = list(np.unique(query.ANALYTE_NAME.values))# get all analytes from dataset
for value in analytes:
if((value in a)==False):
return 'ERROR: No analyte named "{}" in data.'.format(value)
analytes = sorted(analytes)
query = query.loc[query.ANALYTE_NAME.isin(analytes)]
x = query[['COLLECTION_DATE', 'ANALYTE_NAME']]
unique = ~x.duplicated()
query = query[unique]
piv = query.reset_index().pivot(index='COLLECTION_DATE',columns='ANALYTE_NAME', values='RESULT')
piv = piv[analytes]
piv.index = pd.to_datetime(piv.index)
totalSamples = piv.stack().shape[0]
piv = piv.dropna()
if(interpolate):
piv = self.interpolate_well_data(well_name, analytes, frequency=frequency)
title = 'PCA Biplot - ' + well_name + ' - interpolated every ' + frequency
else:
title = 'PCA Biplot - ' + well_name
if(query.shape[0] == 0):
return 'ERROR: {} has no data for the 6 analytes.'.format(date)
samples = query[['COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME']].duplicated().value_counts()[0]
if(samples < min_samples):
return 'ERROR: {} does not have at least {} samples.'.format(date, min_samples)
# if(len(np.unique(query.ANALYTE_NAME.values)) < 6):
# return 'ERROR: {} has less than the 6 analytes we want to analyze.'.format(well_name)
else:
scaler = StandardScaler()
X = scaler.fit_transform(piv.dropna())
pca = PCA(n_components=2)
x_new = pca.fit_transform(X)
fig, ax = plt.subplots(figsize=(15,15))
ax = plt.axes()
small_fontSize = 15
large_fontSize = 20
plt.rc('axes', titlesize=large_fontSize)
plt.rc('axes', labelsize=large_fontSize)
plt.rc('legend', fontsize=small_fontSize)
plt.rc('xtick', labelsize=small_fontSize)
plt.rc('ytick', labelsize=small_fontSize)
def myplot(score,coeff,labels=None):
xs = score[:,0]
ys = score[:,1]
n = coeff.shape[0]
scalex = 1.0/(xs.max() - xs.min())
scaley = 1.0/(ys.max() - ys.min())
scatt_X = xs * scalex
scatt_Y = ys * scaley
scatter = plt.scatter(scatt_X, scatt_Y, alpha=0.8, label='Date samples')
for i in range(n):
arrow = plt.arrow(0, 0, coeff[i,0], coeff[i,1], color = 'r', alpha = 0.9, head_width=0.05, head_length=0.05, label='Loadings')
if labels is None:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, "Var"+str(i+1), color = 'g', ha = 'center', va = 'center')
else:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, labels[i], color = 'g', ha = 'center', va = 'bottom')
if(show_labels):
for x_pos, y_pos, label in zip(scatt_X, scatt_Y, piv.dropna().index.date):
ax.annotate(label, # The label for this point
xy=(x_pos, y_pos), # Position of the corresponding point
xytext=(7, 0), # Offset text by 7 points to the right
textcoords='offset points', # tell it to use offset points
ha='left', # Horizontally aligned to the left
va='center', # Vertical alignment is centered
color='black', alpha=0.8)
plt.legend( [scatter, arrow], ['Date samples', 'Loadings'])
samples = x_new.shape[0]*piv.shape[1]
props = dict(boxstyle='round', facecolor='grey', alpha=0.15)
ax.text(1.1, 0.5, 'Start date: {}\nEnd date: {}\n\nOriginal samples: {}\nSamples used: {}\nDate samples: {}'
.format(piv.index[0].date(), piv.index[-1].date(), totalSamples, samples, x_new.shape[0]),
transform=ax.transAxes, fontsize=20, fontweight='bold', verticalalignment='bottom', bbox=props)
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
ax.set_title(title, fontweight='bold')
plt.grid(alpha=0.5)
#Call the function. Use only the 2 PCs.
myplot(x_new[:,0:2],np.transpose(pca.components_[0:2, :]), labels=piv.columns)
plt.show()
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fig.savefig(save_dir + '/' + title +'.png', bbox_inches="tight")
# Description:
# Plots the well locations on an interactive map given coordinates.
# Parameters:
# gps_data (dataframe): Data frame with the following column names: station_id, latitude, longitude, color. If the color column is not passed, the default color will be blue.
# center (list with 2 floats): latitude and longitude coordinates to center the map view.
# zoom (int): value to determine the initial scale of the map
def plot_coordinates_to_map(self, gps_data, center=[33.271459, -81.675873], zoom=14):
center = center
zoom = 14
m = Map(basemap=basemaps.Esri.WorldImagery, center=center, zoom=zoom)
m.add_control(FullScreenControl())
for (index,row) in gps_data.iterrows():
if('color' in gps_data.columns):
icon = AwesomeIcon(
name='tint',
marker_color=row.loc['color'],
icon_color='black',
spin=False
)
else:
icon = AwesomeIcon(
name='tint',
marker_color='blue',
icon_color='black',
spin=False
)
loc = [row.loc['LATITUDE'],row.loc['LONGITUDE']]
station = HTML(value=row.loc['STATION_ID'])
marker = Marker(location=loc,
icon=icon,
draggable=False,
)
m.add_layer(marker)
popup = Popup(child=station,
max_height=1)
marker.popup = popup
return m
# Description:
# Resamples analyte data based on the frequency specified and interpolates the values in between. NaN values are replaced with the average value per well.
# Parameters:
# analyte (string): analyte name for interpolation of all present wells.
# frequency (string): {‘D’, ‘W’, ‘M’, ‘Y’} frequency to interpolate. See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html for valid frequency inputs. (e.g. ‘W’ = every week, ‘D ’= every day, ‘2W’ = every 2 weeks)
# rm_outliers (bool): flag to remove outliers in the data
# z_threshold (int): z_score threshold to eliminate outliers
def interpolate_wells_by_analyte(self, analyte, frequency='2W', rm_outliers=True, z_threshold=3):
data = self.data
df_t, dates = self.transform_time_series(
analytes=[analyte],
resample=frequency,
rm_outliers=True,
z_threshold=z_threshold)
res_interp = self.get_individual_analyte_df(data=df_t, dates=dates, analyte=analyte)
res_interp = res_interp.dropna(axis=1, how='all')
return res_interp
# IN THE WORKS
def transform_time_series(self, analytes=[], resample='2W', rm_outliers=False, z_threshold=4):
data = self.data
def transform_time_series_by_analyte(data, analyte_name):
wells_analyte = np.unique(data[data.ANALYTE_NAME == analyte_name].STATION_ID)
condensed = data[data.ANALYTE_NAME == analyte_name].groupby(['STATION_ID','COLLECTION_DATE']).mean()
analyte_df_resample = pd.DataFrame(index=wells_analyte, columns=t)
analyte_df_resample.sort_index(inplace=True)
for well in wells_analyte:
for date in condensed.loc[well].index:
analyte_df_resample.at[well, date] = condensed.loc[well,date].RESULT
analyte_df_resample = analyte_df_resample.astype('float').T
analyte_df_resample = analyte_df_resample.interpolate(method='linear')
return analyte_df_resample
all_dates = np.unique(data.COLLECTION_DATE)
# Create array of equally spaced dates
start = pd.Timestamp(all_dates.min())
end = pd.Timestamp(all_dates.max())
delta = end - start
t = np.linspace(start.value, end.value, delta.days)
t =
|
pd.to_datetime(t)
|
pandas.to_datetime
|
from __future__ import annotations
import abc
import inspect
from typing import (
TYPE_CHECKING,
Any,
Dict,
Hashable,
Iterator,
List,
cast,
)
import warnings
import numpy as np
from pandas._config import option_context
from pandas._libs import lib
from pandas._typing import (
AggFuncType,
AggFuncTypeBase,
AggFuncTypeDict,
AggObjType,
Axis,
FrameOrSeriesUnion,
)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCNDFrame,
ABCSeries,
)
from pandas.core.algorithms import safe_sort
from pandas.core.base import (
DataError,
SelectionMixin,
SpecificationError,
)
import pandas.core.common as com
from pandas.core.construction import (
array as pd_array,
create_series_with_explicit_dtype,
)
if TYPE_CHECKING:
from pandas import (
DataFrame,
Index,
Series,
)
from pandas.core.groupby import (
DataFrameGroupBy,
SeriesGroupBy,
)
from pandas.core.resample import Resampler
from pandas.core.window.rolling import BaseWindow
ResType = Dict[int, Any]
def frame_apply(
obj: DataFrame,
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
result_type: str | None = None,
args=None,
kwargs=None,
) -> FrameApply:
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
klass: type[FrameApply]
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
args=args,
kwargs=kwargs,
)
class Apply(metaclass=abc.ABCMeta):
axis: int
def __init__(
self,
obj: AggObjType,
func,
raw: bool,
result_type: str | None,
args,
kwargs,
):
self.obj = obj
self.raw = raw
self.args = args or ()
self.kwargs = kwargs or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (
(kwargs or args)
and not isinstance(func, (np.ufunc, str))
and not is_list_like(func)
):
def f(x):
return func(x, *args, **kwargs)
else:
f = func
self.orig_f: AggFuncType = func
self.f: AggFuncType = f
@property
def index(self) -> Index:
return self.obj.index
@property
def agg_axis(self) -> Index:
return self.obj._get_agg_axis(self.axis)
@abc.abstractmethod
def apply(self) -> FrameOrSeriesUnion:
pass
def agg(self) -> FrameOrSeriesUnion | None:
"""
Provide an implementation for the aggregators.
Returns
-------
Result of aggregation, or None if agg cannot be performed by
this method.
"""
obj = self.obj
arg = self.f
args = self.args
kwargs = self.kwargs
result = self.maybe_apply_str()
if result is not None:
return result
if is_dict_like(arg):
return self.agg_dict_like()
elif is_list_like(arg):
# we require a list, but not a 'str'
return self.agg_list_like()
if callable(arg):
f = com.get_cython_func(arg)
if f and not args and not kwargs:
return getattr(obj, f)()
# caller can react
return None
def transform(self) -> FrameOrSeriesUnion:
"""
Transform a DataFrame or Series.
Returns
-------
DataFrame or Series
Result of applying ``func`` along the given axis of the
Series or DataFrame.
Raises
------
ValueError
If the transform function fails or does not transform.
"""
obj = self.obj
func = self.orig_f
axis = self.axis
args = self.args
kwargs = self.kwargs
is_series = obj.ndim == 1
if obj._get_axis_number(axis) == 1:
assert not is_series
return obj.T.transform(func, 0, *args, **kwargs).T
if is_list_like(func) and not is_dict_like(func):
func = cast(List[AggFuncTypeBase], func)
# Convert func equivalent dict
if is_series:
func = {com.get_callable_name(v) or v: v for v in func}
else:
func = {col: func for col in obj}
if is_dict_like(func):
func = cast(AggFuncTypeDict, func)
return self.transform_dict_like(func)
# func is either str or callable
func = cast(AggFuncTypeBase, func)
try:
result = self.transform_str_or_callable(func)
except TypeError:
raise
except Exception as err:
raise ValueError("Transform function failed") from err
# Functions that transform may return empty Series/DataFrame
# when the dtype is not appropriate
if (
isinstance(result, (ABCSeries, ABCDataFrame))
and result.empty
and not obj.empty
):
raise ValueError("Transform function failed")
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
obj.index
):
raise ValueError("Function did not transform")
return result
def transform_dict_like(self, func):
"""
Compute transform in the case of a dict-like func
"""
from pandas.core.reshape.concat import concat
obj = self.obj
args = self.args
kwargs = self.kwargs
# transform is currently only for Series/DataFrame
assert isinstance(obj, ABCNDFrame)
if len(func) == 0:
raise ValueError("No transform functions were provided")
func = self.normalize_dictlike_arg("transform", obj, func)
results: dict[Hashable, FrameOrSeriesUnion] = {}
failed_names = []
all_type_errors = True
for name, how in func.items():
colg = obj._gotitem(name, ndim=1)
try:
results[name] = colg.transform(how, 0, *args, **kwargs)
except Exception as err:
if str(err) in {
"Function did not transform",
"No transform functions were provided",
}:
raise err
elif not isinstance(err, TypeError):
all_type_errors = False
failed_names.append(name)
# combine results
if not results:
klass = TypeError if all_type_errors else ValueError
raise klass("Transform function failed")
if len(failed_names) > 0:
warnings.warn(
f"{failed_names} did not transform successfully and did not raise "
f"a TypeError. If any error is raised except for TypeError, "
f"this will raise in a future version of pandas. "
f"Drop these columns/ops to avoid this warning.",
FutureWarning,
stacklevel=4,
)
return concat(results, axis=1)
def transform_str_or_callable(self, func) -> FrameOrSeriesUnion:
"""
Compute transform in the case of a string or callable func
"""
obj = self.obj
args = self.args
kwargs = self.kwargs
if isinstance(func, str):
return self._try_aggregate_string_function(obj, func, *args, **kwargs)
if not args and not kwargs:
f = com.get_cython_func(func)
if f:
return getattr(obj, f)()
# Two possible ways to use a UDF - apply or call directly
try:
return obj.apply(func, args=args, **kwargs)
except Exception:
return func(obj, *args, **kwargs)
def agg_list_like(self) -> FrameOrSeriesUnion:
"""
Compute aggregation in the case of a list-like argument.
Returns
-------
Result of aggregation.
"""
from pandas.core.reshape.concat import concat
obj = self.obj
arg = cast(List[AggFuncTypeBase], self.f)
if not isinstance(obj, SelectionMixin):
# i.e. obj is Series or DataFrame
selected_obj = obj
elif obj._selected_obj.ndim == 1:
selected_obj = obj._selected_obj
else:
selected_obj = obj._obj_with_exclusions
results = []
keys = []
# degenerate case
if selected_obj.ndim == 1:
for a in arg:
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
try:
new_res = colg.aggregate(a)
except TypeError:
pass
else:
results.append(new_res)
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
# multiples
else:
for index, col in enumerate(selected_obj):
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
try:
new_res = colg.aggregate(arg)
except (TypeError, DataError):
pass
except ValueError as err:
# cannot aggregate
if "Must produce aggregated value" in str(err):
# raised directly in _aggregate_named
pass
elif "no results" in str(err):
# raised directly in _aggregate_multiple_funcs
pass
else:
raise
else:
results.append(new_res)
keys.append(col)
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError as err:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas import Series
result = Series(results, index=keys, name=obj.name)
if is_nested_object(result):
raise ValueError(
"cannot combine transform and aggregation operations"
) from err
return result
def agg_dict_like(self) -> FrameOrSeriesUnion:
"""
Compute aggregation in the case of a dict-like argument.
Returns
-------
Result of aggregation.
"""
from pandas.core.reshape.concat import concat
obj = self.obj
arg = cast(AggFuncTypeDict, self.f)
if not isinstance(obj, SelectionMixin):
# i.e. obj is Series or DataFrame
selected_obj = obj
selection = None
else:
selected_obj = obj._selected_obj
selection = obj._selection
arg = self.normalize_dictlike_arg("agg", selected_obj, arg)
if selected_obj.ndim == 1:
# key only used for output
colg = obj._gotitem(selection, ndim=1)
results = {key: colg.agg(how) for key, how in arg.items()}
else:
# key used for column selection and output
results = {
key: obj._gotitem(key, ndim=1).agg(how) for key, how in arg.items()
}
# set the final keys
keys = list(arg.keys())
# Avoid making two isinstance calls in all and any below
is_ndframe = [isinstance(r, ABCNDFrame) for r in results.values()]
# combine results
if all(is_ndframe):
keys_to_use = [k for k in keys if not results[k].empty]
# Have to check, if at least one DataFrame is not empty.
keys_to_use = keys_to_use if keys_to_use != [] else keys
axis = 0 if isinstance(obj, ABCSeries) else 1
result = concat({k: results[k] for k in keys_to_use}, axis=axis)
elif any(is_ndframe):
# There is a mix of NDFrames and scalars
raise ValueError(
"cannot perform both aggregation "
"and transformation operations "
"simultaneously"
)
else:
from pandas import Series
# we have a dict of scalars
# GH 36212 use name only if obj is a series
if obj.ndim == 1:
obj = cast("Series", obj)
name = obj.name
else:
name = None
result = Series(results, name=name)
return result
def maybe_apply_str(self) -> FrameOrSeriesUnion | None:
"""
Compute apply in case of a string.
Returns
-------
result: Series, DataFrame, or None
Result when self.f is a string, None otherwise.
"""
f = self.f
if not isinstance(f, str):
return None
obj = self.obj
# TODO: GH 39993 - Avoid special-casing by replacing with lambda
if f == "size" and isinstance(obj, ABCDataFrame):
# Special-cased because DataFrame.size returns a single scalar
value = obj.shape[self.axis]
return obj._constructor_sliced(value, index=self.agg_axis, name="size")
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(obj, f, None)
if callable(func):
sig = inspect.getfullargspec(func)
if "axis" in sig.args:
self.kwargs["axis"] = self.axis
elif self.axis != 0:
raise ValueError(f"Operation {f} does not support axis=1")
return self._try_aggregate_string_function(obj, f, *self.args, **self.kwargs)
def maybe_apply_multiple(self) -> FrameOrSeriesUnion | None:
"""
Compute apply in case of a list-like or dict-like.
Returns
-------
result: Series, DataFrame, or None
Result when self.f is a list-like or dict-like, None otherwise.
"""
# Note: dict-likes are list-like
if not is_list_like(self.f):
return None
return self.obj.aggregate(self.f, self.axis, *self.args, **self.kwargs)
def normalize_dictlike_arg(
self, how: str, obj: FrameOrSeriesUnion, func: AggFuncTypeDict
) -> AggFuncTypeDict:
"""
Handler for dict-like argument.
Ensures that necessary columns exist if obj is a DataFrame, and
that a nested renamer is not passed. Also normalizes to all lists
when values consists of a mix of list and non-lists.
"""
assert how in ("apply", "agg", "transform")
# Can't use func.values(); wouldn't work for a Series
if (
how == "agg"
and isinstance(obj, ABCSeries)
and any(is_list_like(v) for _, v in func.items())
) or (any(is_dict_like(v) for _, v in func.items())):
# GH 15931 - deprecation of renaming keys
raise SpecificationError("nested renamer is not supported")
if obj.ndim != 1:
# Check for missing columns on a frame
cols = set(func.keys()) - set(obj.columns)
if len(cols) > 0:
cols_sorted = list(safe_sort(list(cols)))
raise KeyError(f"Column(s) {cols_sorted} do not exist")
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
# Cannot use func.values() because arg may be a Series
if any(is_aggregator(x) for _, x in func.items()):
new_func: AggFuncTypeDict = {}
for k, v in func.items():
if not is_aggregator(v):
# mypy can't realize v is not a list here
new_func[k] = [v] # type:ignore[list-item]
else:
new_func[k] = v
func = new_func
return func
def _try_aggregate_string_function(self, obj, arg: str, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, str)
f = getattr(obj, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0
return f
f = getattr(np, arg, None)
if f is not None and hasattr(obj, "__array__"):
# in particular exclude Window
return f(obj, *args, **kwargs)
raise AttributeError(
f"'{arg}' is not a valid function for '{type(obj).__name__}' object"
)
class FrameApply(Apply):
obj: DataFrame
# ---------------------------------------------------------------
# Abstract Methods
@property
@abc.abstractmethod
def result_index(self) -> Index:
pass
@property
@abc.abstractmethod
def result_columns(self) -> Index:
pass
@property
@abc.abstractmethod
def series_generator(self) -> Iterator[Series]:
pass
@abc.abstractmethod
def wrap_results_for_axis(
self, results: ResType, res_index: Index
) -> FrameOrSeriesUnion:
pass
# ---------------------------------------------------------------
@property
def res_columns(self) -> Index:
return self.result_columns
@property
def columns(self) -> Index:
return self.obj.columns
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self) -> Series:
return self.obj.dtypes
def apply(self) -> FrameOrSeriesUnion:
""" compute the results """
# dispatch to agg
result = self.maybe_apply_multiple()
if result is not None:
return result
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
result = self.maybe_apply_str()
if result is not None:
return result
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all="ignore"):
results = self.obj._mgr.apply("apply", func=self.f)
# _constructor will retain self.index and self.columns
return self.obj._constructor(data=results)
# broadcasting
if self.result_type == "broadcast":
return self.apply_broadcast(self.obj)
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw:
return self.apply_raw()
return self.apply_standard()
def agg(self):
obj = self.obj
axis = self.axis
if axis == 1:
result = FrameRowApply(
obj.T,
self.orig_f,
self.raw,
self.result_type,
self.args,
self.kwargs,
).agg()
result = result.T if result is not None else result
else:
result = super().agg()
if result is None:
result = obj.apply(self.orig_f, axis, args=self.args, **self.kwargs)
return result
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
assert callable(self.f)
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ["reduce", None]:
return self.obj.copy()
# we may need to infer
should_reduce = self.result_type == "reduce"
from pandas import Series
if not should_reduce:
try:
r = self.f(
|
Series([], dtype=np.float64)
|
pandas.Series
|
# Preprocessing
import os, matplotlib
if 'DISPLAY' not in os.environ:
matplotlib.use('Pdf')
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.max_rows', 50)
import numpy as np
import xgboost as xgb
import xgbfir
import pdb
import time
np.random.seed(1337)
def client_anaylsis():
"""
The idea here is to unify the client ID of several different customers to more broad categories.
"""
# clean duplicate spaces in client names
client_df = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
client_df["NombreCliente"] = client_df["NombreCliente"].str.lower()
client_df["NombreCliente"] = client_df["NombreCliente"].apply(lambda x: " ".join(x.split()))
client_df = client_df.drop_duplicates(subset="Cliente_ID")
special_list = ["^(yepas)\s.*", "^(oxxo)\s.*", "^(bodega\scomercial)\s.*", "^(bodega\saurrera)\s.*", "^(bodega)\s.*",
"^(woolwort|woolworth)\s.*", "^(zona\sexpress)\s.*",
"^(zacatecana)\s.*", "^(yza)\s.*",
"^(yanet)\s.*", "^(yak)\s.*",
"^(wings)\s.*", "^(wendy)\s.*", "^(walmart\ssuper)\s?.*", "^(waldos)\s.*",
"^(wal\smart)\s.*", "^(vulcanizadora)\s.*", "^(viveres\sy\sservicios)\s.*",
"^(vips)\s.*", "^(vinos\sy\slicores)\s.*", "^(tienda\ssuper\sprecio)\s.*",
"^(vinos\sy\sabarrotes)\s.*", "^(vinateria)\s.*", "^(video\sjuegos)\s.*", "^(universidad)\s.*",
"^(tiendas\stres\sb)\s.*", "^(toks)\s.*","^(tkt\ssix)\s.*",
"^(torteria)\s.*", "^(tortas)\s.*", "^(super\sbara)\s.*",
"^(tiendas\sde\ssuper\sprecio)\s.*", "^(ultramarinos)\s.*", "^(tortilleria)\s.*",
"^(tienda\sde\sservicio)\s.*", "^(super\sx)\s.*", "^(super\swillys)\s.*",
"^(super\ssanchez)\s.*", "^(super\sneto)\s.*", "^(super\skompras)\s.*",
"^(super\skiosco)\s.*", "^(super\sfarmacia)\s.*", "^(super\scarnes)\s.*",
"^(super\scarniceria)\s.*", "^(soriana)\s.*", "^(super\scenter)\s.*",
"^(solo\sun\sprecio)\s.*", "^(super\scity)\s.*", "^(super\sg)\s.*", "^(super\smercado)\s.*",
"^(sdn)\s.*", "^(sams\sclub)\s.*", "^(papeleria)\s.*", "^(multicinemas)\s.*",
"^(mz)\s.*", "^(motel)\s.*", "^(minisuper)\s.*", "^(mini\stienda)\s.*",
"^(mini\ssuper)\s.*", "^(mini\smarket)\s.*", "^(mini\sabarrotes)\s.*", "^(mi\sbodega)\s.*",
"^(merza|merzapack)\s.*", "^(mercado\ssoriana)\s.*", "^(mega\scomercial)\s.*",
"^(mc\sdonalds)\s.*", "^(mb)\s[^ex].*", "^(maquina\sfma)\s.*", "^(ley\sexpress)\s.*",
"^(lavamatica)\s.*", "^(kiosko)\s.*", "^(kesos\sy\skosas)\s.*", "^(issste)\s.*",
"^(hot\sdogs\sy\shamburguesas|)\s.*", "^(hamburguesas\sy\shot\sdogs)\s.*", "(hot\sdog)",
"^(hospital)\s.*", "^(hiper\ssoriana)\s.*", "^(super\sahorros)\s.*", "^(super\sabarrotes)\s.*",
"^(hambuerguesas|hamburguesas|hamburgesas)\s.*", "^(gran\sbodega)\s.*",
"^(gran\sd)\s.*", "^(go\smart)\s.*", "^(gasolinera)\s.*", "^(fundacion)\s.*",
"^(fruteria)\s.*", "^(frutas\sy\sverduras)\s.*", "^(frutas\sy\slegumbres)\s.*",
"^(frutas\sy\sabarrotes)\s.*", "^(fma)\s.*", "^(fiesta\sinn)\s.*", "^(ferreteria)\s.*",
"^(farmacon)\s.*", "^(farmacias)\s.*", "^(farmacia\syza)\s.*",
"^(farmacia\smoderna)\s.*", "^(farmacia\slopez)\s.*",
"^(farmacia\sissste)\s.*", "^(farmacia\sisseg)\s.*", "^(farmacia\sguadalajara)\s.*",
"^(farmacia\sesquivar)\s.*", "^(farmacia\scalderon)\s.*", "^(farmacia\sbenavides)\s.*",
"^(farmacia\sabc)\s.*", "^(farmacia)\s.*", "^(farm\sguadalajara)\s.*",
"^(facultad\sde)\s.*", "^(f\sgdl)\s.*", "^(expendio)\s.*", "^(expendio\sde\span)\s.*",
"^(expendio\sde\shuevo)\s.*", "^(expendio\sbimbo)\s.*", "^(expendedoras\sautomaticas)\s.*",
"^(estic)\s.*", "^(estancia\sinfantil)\s.*", "^(estacionamiento)\s.*", "^(estanquillo)\s.*",
"^(estacion\sde\sservicio)\s.*", "^(establecimientos?)\s.*",
"^(escuela\suniversidad|esc\suniversidad)\s.*", "^(escuela\stelesecundaria|esc\stelesecundaria)\s.*",
"^(escuela\stecnica|esc\stecnica)\s.*",
"^(escuela\ssuperior|esc\ssuperior)\s.*", "^(escuela\ssecundaria\stecnica|esc\ssecundaria\stecnica)\s.*",
"^(escuela\ssecundaria\sgeneral|esc\ssecundaria\sgeneral)\s.*",
"^(escuela\ssecundaria\sfederal|esc\ssecundaria\sfederal)\s.*",
"^(escuela\ssecundaria|esc\ssecundaria)\s.*", "^(escuela\sprimaria|esc\sprimaria)\s.*",
"^(escuela\spreparatoria|esc\spreparatoria)\s.*", "^(escuela\snormal|esc\snormal)\s.*",
"^(escuela\sinstituto|esc\sinstituto)\s.*", "^(esc\sprepa|esc\sprep)\s.*",
"^(escuela\scolegio|esc\scolegio)\s.*", "^(escuela|esc)\s.*", "^(dunosusa)\s.*",
"^(ferreteria)\s.*", "^(dulces)\s.*", "^(dulceria)\s.*", "^(dulce)\s.*", "^(distribuidora)\s.*",
"^(diconsa)\s.*", "^(deposito)\s.*", "^(del\srio)\s.*", "^(cyber)\s.*", "^(cremeria)\s.*",
"^(cosina\seconomica)\s.*", "^(copy).*", "^(consumo|consumos)\s.*","^(conalep)\s.*",
"^(comercializadora)\s.*", "^(comercial\ssuper\salianza)\s.*",
"^(comercial\smexicana)\s.*", "^(comedor)\s.*", "^(colegio\sde\sbachilleres)\s.*",
"^(colegio)\s.*", "^(coffe).*", "^(cocteleria|cockteleria)\s.*", "^(cocina\seconomica)\s.*",
"^(cocina)\s.*", "^(cobaev)\s.*", "^(cobaes)\s.*", "^(cobaeh)\s.*", "^(cobach)\s.*",
"^(club\sde\sgolf)\s.*", "^(club\scampestre)\s.*", "^(city\sclub)\s.*", "^(circulo\sk)\s.*",
"^(cinepolis)\s.*", "^(cinemex)\s.*", "^(cinemas)\s.*", "^(cinemark)\s.*", "^(ciber)\s.*",
"^(church|churchs)\s.*", "^(chilis)\s.*", "^(chiles\sy\ssemillas)\s.*", "^(chiles\ssecos)\s.*",
"^(chedraui)\s.*", "^(cetis)\s.*", "^(cervefrio)\s.*", "^(cervefiesta)\s.*",
"^(cerveceria)\s.*", "^(cervecentro)\s.*", "^(centro\sescolar)\s.*", "^(centro\seducativo)\s.*",
"^(centro\sde\sestudios)\s.*", "^(centro\scomercial)\s.*", "^(central\sde\sautobuses)\s.*",
"^(cecytem)\s.*", "^(cecytec)\s.*", "^(cecyte)\s.*", "^(cbtis)\s.*", "^(cbta)\s.*", "^(cbt)\s.*",
"^(caseta\stelefonica)\s.*", "^(caseta)\s.*", "^(casa\sley)\s.*", "^(casa\shernandez)\s.*",
"^(cartonero\scentral)\s.*", "^(carniceria)\s.*", "^(carne\smart)\s.*", "^(calimax)\s.*",
"^(cajero)\s.*", "^(cafeteria)\s.*", "^(cafe)\s.*", "^(burritos)\s.*",
"^(burguer\sking|burger\sking)\s.*", "^(bip)\s.*", "^(bimbo\sexpendio)\s.*",
"^(burguer|burger)\s.*", "^(ba.os)\s.*", "^(bae)\s.*", "^(bachilleres)\s.*", "^(bachillerato)\s.*",
"^(autosercivio|auto\sservicio)\s.*", "^(autolavado|auto\slavado)\s.*",
"^(autobuses\sla\spiedad|autobuses\sde\sla\piedad)\s.*", "^(arrachera)\s.*",
"^(alsuper\sstore)\s.*", "^(alsuper)\s.*", "^(academia)\s.*", "^(abts)\s.*",
"^(abarrotera\slagunitas)\s.*", "^(abarrotera)\s.*", "^(abarrotes\sy\svinos)\s.*",
"^(abarrotes\sy\sverduras)\s.*", "^(abarrotes\sy\ssemillas)\s.*",
"^(abarrotes\sy\spapeleria)\s.*", "^(abarrotes\sy\snovedades)\s.*", "^(abarrotes\sy\sfruteria)\s.*",
"^(abarrotes\sy\sdeposito)\s.*", "^(abarrotes\sy\scremeria)\s.*", "^(abarrotes\sy\scarniceria)\s.*",
"^(abarrotes\svinos\sy\slicores)\s.*", "^(abarrote|abarrotes|abarotes|abarr|aba|ab)\s.*",
"^(7\seleven)\s.*", "^(7\s24)\s.*"]
client_df["NombreCliente2"] = client_df["NombreCliente"]
for var in special_list:
client_df[var] = client_df["NombreCliente"].str.extract(var, expand=False).str.upper()
replace = client_df.loc[~client_df[var].isnull(), var]
client_df.loc[~client_df[var].isnull(),"NombreCliente2"] = replace
client_df.drop(var, axis=1, inplace=True)
client_df.drop("NombreCliente", axis=1, inplace=True)
client_df.to_csv("../data/cliente_tabla2.csv.gz", compression="gzip", index=False)
def client_anaylsis2():
"""
The idea here is to unify the client ID of several different customers to more broad categories in another
different way
"""
client_df =
|
pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
|
pandas.read_csv
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for pre-processing the data into individual, standardized formats."""
import collections
import datetime
import itertools
import os
import pathlib
import re
from typing import Callable, Dict, Set, Tuple
from absl import logging
from dm_c19_modelling.england_data import constants
import pandas as pd
import yaml
_PATH_FILENAME_REGEXES = "filename_regexes.yaml"
_COLUMNS = constants.Columns
_DATE_FORMAT = "%Y-%m-%d"
def _order_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Orders the columns of the dataframe as: date, region, observations."""
df.insert(0, _COLUMNS.DATE.value, df.pop(_COLUMNS.DATE.value))
reg_columns = []
obs_columns = []
for col in df.columns[1:]:
if col.startswith(constants.REGION_PREFIX):
reg_columns.append(col)
elif col.startswith(constants.OBSERVATION_PREFIX):
obs_columns.append(col)
else:
raise ValueError(f"Unknown column: '{col}'")
columns = [_COLUMNS.DATE.value] + reg_columns + obs_columns
return df[columns]
def _raw_data_formatter_daily_deaths(filepath: str) -> pd.DataFrame:
"""Loads and formats daily deaths data."""
sheet_name = "Tab4 Deaths by trust"
header = 15
df = pd.read_excel(filepath, sheet_name=sheet_name, header=header)
# Drop rows and columns which are all nans.
df.dropna(axis=0, how="all", inplace=True)
df.dropna(axis=1, how="all", inplace=True)
# Drop unneeded columns and rows.
drop_columns = ["Total", "Awaiting verification"]
up_to_mar_1_index = "Up to 01-Mar-20"
if sum(i for i in df[up_to_mar_1_index] if isinstance(i, int)) == 0.0:
drop_columns.append(up_to_mar_1_index)
df.drop(columns=drop_columns, inplace=True)
df = df[df["Code"] != "-"]
# Melt the death counts by date into "Date" and "Death Count" columns.
df = df.melt(
id_vars=["NHS England Region", "Code", "Name"],
var_name="Date",
value_name="Death Count")
# Rename the columns to their standard names.
df.rename(
columns={
"Date": _COLUMNS.DATE.value,
"Death Count": _COLUMNS.OBS_DEATHS.value,
"Code": _COLUMNS.REG_TRUST_CODE.value,
"Name": _COLUMNS.REG_TRUST_NAME.value,
"NHS England Region": _COLUMNS.REG_NHSER_NAME.value,
},
inplace=True)
_order_columns(df)
df[_COLUMNS.DATE.value] = df[_COLUMNS.DATE.value].map(
lambda x: x.strftime(_DATE_FORMAT))
# Sort and clean up the indices before returning the final dataframe.
df.sort_values([
_COLUMNS.DATE.value,
_COLUMNS.REG_TRUST_NAME.value,
_COLUMNS.REG_TRUST_CODE.value,
],
inplace=True)
df.reset_index(drop=True, inplace=True)
if df.isna().any().any():
raise ValueError("Formatted data 'daily_deaths' contains nans")
return df
def _raw_data_formatter_daily_cases(filepath: str) -> pd.DataFrame:
"""Loads and formats daily cases data."""
df =
|
pd.read_csv(filepath)
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
##### Author : <NAME>
##### github link : https://github.com/amirshnll/Abalone
##### dataset link : http://archive.ics.uci.edu/ml/datasets/Abalone
##### email : <EMAIL>
# In[5]:
import sklearn
import numpy as np
import pandas as pd
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
# In[6]:
#read file
df =
|
pd.read_csv("D:\\abalone.txt", header=None)
|
pandas.read_csv
|
import pyodbc
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import math
class DataManager:
def __init__(self):
self.now = datetime.now()
self.conn = self.conn = pyodbc.connect('DRIVER={SQL Server};'
'SERVER=ZIRSYSPRO;'
'DATABASE=MAINTDATA;'
'Trusted_Connection=yes')
self.prod_lists = self.sql_data_lists('prs457_good_count_jnl',
*self.current_shift())
self.defect_lists = self.sql_data_lists('prs457_defect_code_jnl',
*self.current_shift())
# Production constants.
self.nameplate = 23 # seconds per cycle, ideal/nominal.
self.break_time = 1 # hour per shift
self.hours_per_shift = 8
def current_shift(self):
self.now = datetime.now()
hour = self.now.hour
assert 0 <= hour <= 23, "hour out of range for current_shift method."
if 7 <= hour <= 14:
start_time = self.now.replace(hour=7, minute=0, second=0)
end_time = self.now.replace(hour=14, minute=59, second=59)
elif 15 <= hour <= 22:
start_time = self.now.replace(hour=15, minute=0, second=0)
end_time = self.now.replace(hour=22, minute=59, second=59)
elif hour == 23:
tomorrow = self.now + timedelta(days=1)
start_time = self.now.replace(hour=23, minute=0, second=0)
end_time = tomorrow.replace(hour=6, minute=59, second=59)
elif 0 <= hour <= 6:
yesterday = self.now - timedelta(days=-1)
start_time = yesterday.replace(hour=23, minute=0, second=0)
end_time = self.now.replace(hour=6, minute=59, second=59)
return start_time, end_time
def sql_data_lists(self, table, start_time, end_time):
cursor = self.conn.cursor()
cursor.execute("SELECT * FROM {0} "
"WHERE (submit_datetime > '{1}' "
"AND submit_datetime < '{2}')"
.format(table,
start_time.strftime('%Y-%m-%d %H:%M:%S'),
end_time.strftime('%Y-%m-%d %H:%M:%S')))
data = cursor.fetchall()
array = np.array(data)
column_lists = array.transpose().tolist()
try:
column_lists.pop(0)
except IndexError:
pass
return column_lists
def data_reset(self):
self.prod_lists = self.sql_data_lists('prs457_good_count_jnl',
*self.current_shift())
self.defect_lists = self.sql_data_lists('prs457_defect_code_jnl',
*self.current_shift())
@staticmethod
def set_list_of_lists(length, list_of_lists):
empty_lol = []
for idx in range(length):
empty_lol.append([])
if (len(list_of_lists) != len(empty_lol)
or type(list_of_lists) != type(empty_lol)):
return empty_lol
else:
for item in list_of_lists:
assert type(item) == list, \
"list of lists contains non-list item."
return list_of_lists
@property
def prod_rate(self):
return self._prod_rate
@prod_rate.setter
def prod_rate(self, rate):
if rate <= 23.5:
self._prod_rate = 23.5
else:
self._prod_rate = rate
return self._prod_rate
@property
def prod_lists(self):
return self._prod_lists
@prod_lists.setter
def prod_lists(self, list_of_lists):
self._prod_lists = self.set_list_of_lists(8, list_of_lists)
@property
def defect_lists(self):
return self._defect_lists
@defect_lists.setter
def defect_lists(self, list_of_lists):
self._defect_lists = self.set_list_of_lists(7, list_of_lists)
def prod_append(self, prod_list):
assert len(prod_list) == 6, "prod_list is wrong size for this method."
for value in prod_list:
assert 0 <= value <= 1, "list values are not 0 or 1, as expected"
good_cursor = self.conn.cursor()
good_submit = list(prod_list)
good_submit.extend([sum(prod_list), self.now])
good_cursor.execute("INSERT INTO prs457_good_count_jnl(station_one, "
"station_two, station_three, station_four, "
"station_five, station_six, total_good, "
"submit_datetime) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
*good_submit)
good_cursor.commit()
good_cursor.close()
index = 0
for data_list in self._prod_lists:
data_list.append(good_submit[index])
index += 1
def defect_append(self, defect_list):
assert len(defect_list) == 6, \
"prod_list is wrong size for this method."
for value in defect_list:
assert 0 <= value <= 16, \
"defect value is outside of expected range."
defect_cursor = self.conn.cursor()
defect_submit = list(defect_list)
defect_submit.append(self.now)
defect_cursor.execute("INSERT INTO prs457_defect_code_jnl(station_one, "
"station_two, station_three, station_four, "
"station_five, station_six, submit_datetime) "
"VALUES (?, ?, ?, ?, ?, ?, ?)",
*defect_submit)
defect_cursor.commit()
defect_cursor.close()
index = 0
for data_list in self._defect_lists:
data_list.append(defect_submit[index])
index += 1
def top_three_defect(self, station):
assert 1 <= station <= 6, "Station does not exist."
defect_data = self.defect_lists[station-1]
defect_count =
|
pd.Series(defect_data)
|
pandas.Series
|
# %% [markdown]
# ##
import os
import time
import colorcet as cc
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.linalg import orthogonal_procrustes
from scipy.optimize import linear_sum_assignment
from sklearn.metrics import adjusted_rand_score
from tqdm import tqdm
from graspy.cluster import GaussianCluster
from graspy.embed import AdjacencySpectralEmbed
from graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.simulations import rdpg
from graspy.utils import binarize, pass_to_ranks
from src.data import load_metagraph
from src.graph import preprocess
from src.hierarchy import signal_flow
from src.io import savefig
from src.visualization import (
CLASS_COLOR_DICT,
barplot_text,
gridmap,
matrixplot,
stacked_barplot,
)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def get_paired_inds(meta):
# pair_meta = meta[meta["Pair"] != -1].copy()
pair_meta = meta[meta["Pair"].isin(meta.index)]
pair_group_size = pair_meta.groupby("Pair ID").size()
remove_pairs = pair_group_size[pair_group_size == 1].index
pair_meta = pair_meta[~pair_meta["Pair ID"].isin(remove_pairs)]
assert pair_meta.groupby("Pair ID").size().min() == 2
pair_meta.sort_values(["Pair ID", "hemisphere"], inplace=True)
lp_inds = pair_meta[pair_meta["hemisphere"] == "L"]["inds"]
rp_inds = pair_meta[pair_meta["hemisphere"] == "R"]["inds"]
assert (
meta.iloc[lp_inds]["Pair ID"].values == meta.iloc[rp_inds]["Pair ID"].values
).all()
return lp_inds, rp_inds
def compute_pairedness(partition, left_pair_inds, right_pair_inds, plot=False):
uni_labels, inv = np.unique(partition, return_inverse=True)
train_int_mat = np.zeros((len(uni_labels), len(uni_labels)))
for i, ul in enumerate(uni_labels):
c1_mask = inv == i
for j, ul in enumerate(uni_labels):
c2_mask = inv == j
# number of times a thing in cluster 1 has a pair also in cluster 2
pairs_in_other = np.logical_and(
c1_mask[left_pair_inds], c2_mask[right_pair_inds]
).sum()
train_int_mat[i, j] = pairs_in_other
row_ind, col_ind = linear_sum_assignment(train_int_mat, maximize=True)
train_pairedness = np.trace(train_int_mat[np.ix_(row_ind, col_ind)]) / np.sum(
train_int_mat
) # TODO double check that this is right
if plot:
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
sns.heatmap(
train_int_mat, square=True, ax=axs[0], cbar=False, cmap="RdBu_r", center=0
)
int_df = pd.DataFrame(data=train_int_mat, index=uni_labels, columns=uni_labels)
int_df = int_df.reindex(index=uni_labels[row_ind])
int_df = int_df.reindex(columns=uni_labels[col_ind])
sns.heatmap(int_df, square=True, ax=axs[1], cbar=False, cmap="RdBu_r", center=0)
return train_pairedness, row_ind, col_ind
def compute_pairedness_bipartite(left_labels, right_labels):
left_uni_labels, left_inv = np.unique(left_labels, return_inverse=True)
right_uni_labels, right_inv = np.unique(right_labels, return_inverse=True)
train_int_mat = np.zeros((len(left_uni_labels), len(right_uni_labels)))
for i, ul in enumerate(left_uni_labels):
c1_mask = left_inv == i
for j, ul in enumerate(right_uni_labels):
c2_mask = right_inv == j
# number of times a thing in cluster 1 has a pair also in cluster 2
pairs_in_other = np.logical_and(c1_mask, c2_mask).sum()
train_int_mat[i, j] = pairs_in_other
row_ind, col_ind = linear_sum_assignment(train_int_mat, maximize=True)
train_pairedness = np.trace(train_int_mat[np.ix_(row_ind, col_ind)]) / np.sum(
train_int_mat
) # TODO double check that this is right
return train_pairedness, row_ind, col_ind
def crossval_cluster(
embed,
left_inds,
right_inds,
R,
min_clusters=2,
max_clusters=15,
n_init=25,
left_pair_inds=None,
right_pair_inds=None,
):
left_embed = embed[left_inds]
right_embed = embed[right_inds]
print("Running left/right clustering with cross-validation\n")
currtime = time.time()
rows = []
for k in tqdm(range(min_clusters, max_clusters)):
# train left, test right
# TODO add option for AutoGMM as well, might as well check
left_gc = GaussianCluster(min_components=k, max_components=k, n_init=n_init)
left_gc.fit(left_embed)
model = left_gc.model_
train_left_bic = model.bic(left_embed)
train_left_lik = model.score(left_embed)
test_left_bic = model.bic(right_embed @ R.T)
test_left_lik = model.score(right_embed @ R.T)
# train right, test left
right_gc = GaussianCluster(min_components=k, max_components=k, n_init=n_init)
right_gc.fit(right_embed)
model = right_gc.model_
train_right_bic = model.bic(right_embed)
train_right_lik = model.score(right_embed)
test_right_bic = model.bic(left_embed @ R)
test_right_lik = model.score(left_embed @ R)
left_row = {
"k": k,
"contra_bic": -test_left_bic,
"contra_lik": test_left_lik,
"ipsi_bic": -train_left_bic,
"ipsi_lik": train_left_lik,
"cluster": left_gc,
"train": "left",
"n_components": n_components,
}
right_row = {
"k": k,
"contra_bic": -test_right_bic,
"contra_lik": test_right_lik,
"ipsi_bic": -train_right_bic,
"ipsi_lik": train_right_lik,
"cluster": right_gc,
"train": "right",
"n_components": n_components,
}
# pairedness computation, if available
if left_pair_inds is not None and right_pair_inds is not None:
# TODO double check this is right
pred_left = left_gc.predict(embed[left_pair_inds])
pred_right = right_gc.predict(embed[right_pair_inds])
pness, _, _ = compute_pairedness_bipartite(pred_left, pred_right)
left_row["pairedness"] = pness
right_row["pairedness"] = pness
ari = adjusted_rand_score(pred_left, pred_right)
left_row["ARI"] = ari
right_row["ARI"] = ari
rows.append(left_row)
rows.append(right_row)
results = pd.DataFrame(rows)
print(f"{time.time() - currtime} elapsed")
return results
def plot_crossval_cluster(results):
fig, axs = plt.subplots(3, 1, figsize=(10, 10), sharex=True)
ax = axs[0]
sns.lineplot(data=results, x="k", y="contra_lik", hue="train", ax=ax, legend=False)
ax.lines[0].set_linestyle("--")
ax.lines[1].set_linestyle("--")
sns.lineplot(data=results, x="k", y="ipsi_lik", hue="train", ax=ax, legend=False)
ax.set_ylabel("Log likelihood")
ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(nbins=3, min_n_ticks=3))
ax = axs[1]
sns.lineplot(data=results, x="k", y="contra_bic", hue="train", ax=ax, legend="full")
ax.lines[0].set_linestyle("--")
ax.lines[1].set_linestyle("--")
sns.lineplot(data=results, x="k", y="ipsi_bic", hue="train", ax=ax, legend="full")
ax.set_ylabel("-BIC")
ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(nbins=3, min_n_ticks=3))
leg = ax.legend()
leg.set_title("Train side")
leg.texts[0].set_text("Test contra")
leg.set_bbox_to_anchor((1, 1.8))
lines = leg.get_lines()
lines[0].set_linestyle("--")
lines[1].set_linestyle("--")
lines[2].set_linestyle("--")
leg.texts[3].set_text("Test ipsi")
ax = axs[2]
sns.lineplot(
data=results,
x="k",
y="pairedness",
ax=ax,
legend="full",
color="purple",
label="Pairedness",
)
sns.lineplot(
data=results, x="k", y="ARI", ax=ax, legend="full", color="green", label="ARI"
)
ax.set_ylabel("Pair score")
leg = ax.legend().remove()
ax.legend(bbox_to_anchor=(1, 1), loc="upper left")
# leg.loc = 2
# leg.set_bbox_to_anchor((1, 1))
# ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(nbins=3, min_n_ticks=3))
# trans = transforms.blended_transform_factory(ax.transAxes, ax.transAxes)
# ax.text(0.8, 0.8, "Pairedness", color="purple", transform=trans)
# ax.text(0.8, 0.6, "ARI", color="green", transform=trans)
return fig, axs
def make_ellipses(gmm, ax, i, j, colors, alpha=0.5, equal=False, **kws):
inds = [j, i]
for n, color in enumerate(colors):
if gmm.covariance_type == "full":
covariances = gmm.covariances_[n][np.ix_(inds, inds)]
elif gmm.covariance_type == "tied":
covariances = gmm.covariances_[np.ix_(inds, inds)]
elif gmm.covariance_type == "diag":
covariances = np.diag(gmm.covariances_[n][inds])
elif gmm.covariance_type == "spherical":
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2.0 * np.sqrt(2.0) * np.sqrt(v)
ell = mpl.patches.Ellipse(
gmm.means_[n, inds], v[0], v[1], 180 + angle, color=color, **kws
)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ax.add_artist(ell)
if equal:
ax.set_aspect("equal", "datalim")
def plot_cluster_pairs(
X,
left_inds,
right_inds,
left_model,
right_model,
labels,
left_colors=None,
right_colors=None,
equal=True,
):
k = left_model.n_components
n_dims = X.shape[1]
if left_colors is None and right_colors is None:
tab20 = sns.color_palette("tab20", n_colors=2 * k, desat=0.7)
left_colors = tab20[::2]
right_colors = tab20[1::2]
colors = left_colors + right_colors
fig, axs = plt.subplots(
n_dims, n_dims, sharex=False, sharey=False, figsize=(20, 20)
)
data =
|
pd.DataFrame(data=X)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha44']
return alpha
@timer
def alpha45(self):
volume = self.volume
vwap = self.vwap
close = self.close
Open = self.open
price = pd.concat([close,Open],axis = 1,join = 'inner')
price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4
price_delta = Delta(pd.DataFrame(price['price']),1)
r1 = Rank(price_delta)
volume_mean = Mean(volume,150)
data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,15)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha45']
return alpha
@timer
def alpha46(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner')
data.columns = ['c','c3','c6','c12','c24']
alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c'])
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha46']
return alpha
@timer
def alpha47(self):
close = self.close
low = self.low
high = self.high
high_max = TsMax(high,6)
low_min = TsMin(low,6)
data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner')
data.columns = ['high_max','low_min','close']
temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \
data['low_min']) * 100)
alpha = SMA(temp,9,1)
alpha.columns = ['alpha47']
return alpha
@timer
def alpha48(self):
close = self.close
volume = self.volume
temp1 = Delta(close,1)
temp1_delay1 = Delay(temp1,1)
temp1_delay2 = Delay(temp1,2)
data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner')
data.columns = ['temp1','temp1_delay1','temp1_delay2']
temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \
+ np.sign(data['temp1_delay2']))
volume_sum5 = Sum(volume,5)
volume_sum20 = Sum(volume,20)
data_temp = pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
data_temp.columns = ['temp2','volume_sum5','volume_sum20']
temp3 = pd.DataFrame(data_temp['temp2'] * data_temp['volume_sum5']/\
data_temp['volume_sum20'])
alpha = -1 * Rank(temp3)
alpha.columns = ['alpha48']
return alpha
@timer
def alpha49(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 0
price['temp'][price['sum'] < price['sum_delay']] = 1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha49']
return alpha
@timer
def alpha50(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = -1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha50']
return alpha
@timer
def alpha51(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = 0
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha51']
return alpha
@timer
def alpha52(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
data['sum_delay'] = Delay(pd.DataFrame((data['High'] + data['Low'] + data['Close'])/3),1)
temp1 = pd.DataFrame(data['High'] - data['sum_delay'])
temp1.columns = ['high_diff']
temp2 = pd.DataFrame(data['sum_delay'] - data['Low'])
temp2.columns = ['low_diff']
temp1['max'] = temp1['high_diff']
temp1['max'][temp1['high_diff'] < 0 ] = 0
temp2['max'] = temp2['low_diff']
temp2['max'][temp2['low_diff'] < 0 ] = 0
temp1_sum = Sum(pd.DataFrame(temp1['max']),26)
temp2_sum = Sum(pd.DataFrame(temp2['max']),26)
alpha_temp = pd.concat([temp1_sum,temp2_sum],axis = 1,join = 'inner')
alpha_temp.columns = ['s1','s2']
alpha = pd.DataFrame(alpha_temp['s1']/alpha_temp['s2'] * 100)
alpha.columns = ['alpha52']
return alpha
@timer
def alpha53(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,12)
alpha = count/12.0 * 100
alpha.columns = ['alpha53']
return alpha
@timer
def alpha54(self):
Open = self.open
close = self.close
data = pd.concat([Open,close], axis = 1, join = 'inner')
data.columns = ['close','open']
temp = pd.DataFrame(data['close'] - data['open'])
temp_abs = pd.DataFrame(np.abs(temp))
df = pd.concat([temp,temp_abs], axis = 1, join= 'inner')
df.columns = ['temp','abs']
std = STD(pd.DataFrame(df['temp'] + df['abs']),10)
corr = Corr(data,10)
data1 = pd.concat([corr,std],axis = 1, join = 'inner')
data1.columns = ['corr','std']
alpha = Rank(pd.DataFrame(data1['corr'] + data1['std'])) * -1
alpha.columns = ['alpha54']
return alpha
@timer
def alpha55(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
tep = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha = Sum(tep,20)
alpha.columns = ['alpha55']
return alpha
@timer
def alpha56(self):
low = self.low
high = self.high
volume = self.volume
Open = self.open
open_min = TsMin(Open,12)
data1 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data1.columns = ['open','open_min']
r1 = Rank(pd.DataFrame(data1['open'] - data1['open_min']))
volume_mean = Mean(volume,40)
volume_mean_sum= Sum(volume_mean,19)
data2 = pd.concat([high,low],axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
rank = pd.concat([temp,volume_mean_sum],axis = 1 , join = 'inner')
rank.columns = ['temp','volume_mean_sum']
corr = Corr(rank,13)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] >= r['r2']] = 1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha56']
return alpha
@timer
def alpha57(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data['close'] - data['low_min'])/(data['high_max'] \
- data['low_min']) * 100)
alpha = SMA(temp,3,1)
alpha.columns = ['alpha57']
return alpha
@timer
def alpha58(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,20)
alpha = count/20.0 * 100
alpha.columns = ['alpha58']
return alpha
@timer
def alpha59(self):
low = self.low
high = self.high
close = self.close
close_delay = Delay(close,1)
max_temp = pd.concat([high,close_delay],axis = 1,join = 'inner')
min_temp = pd.concat([low,close_delay],axis = 1,join = 'inner')
max_temp1 = pd.DataFrame(np.max(max_temp,axis = 1))
min_temp1 = pd.DataFrame(np.min(min_temp,axis = 1))
data = pd.concat([close,close_delay,max_temp1,min_temp1],axis = 1,join = 'inner')
data.columns = ['close','close_delay','max','min']
data['max'][data['close'] > data['close_delay']] = 0
data['min'][data['close'] <= data['close_delay']] = 0
alpha = pd.DataFrame(data['max'] + data['min'])
alpha.columns = ['alpha59']
return alpha
@timer
def alpha60(self):
low = self.low
high = self.high
close = self.close
volume = self.volume
data = pd.concat([low,high,close,volume],axis = 1,join = 'inner')
temp = pd.DataFrame((2 * data['Close'] - data['Low'] - data['High'])/(data['Low'] + \
data['High']) * data['Vol'])
alpha = Sum(temp,20)
alpha.columns = ['alpha60']
return alpha
@timer
def alpha61(self):
low = self.low
volume = self.volume
vwap = self.vwap
vwap_delta = Delta(vwap,1)
vwap_delta_decay = DecayLinear(vwap_delta,12)
r1 = Rank(vwap_delta_decay)
volume_mean = Mean(volume,80)
data = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,8)
corr_decay = DecayLinear(corr,17)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) * -1)
alpha.columns = ['alpha61']
return alpha
@timer
def alpha62(self):
high = self.high
volume = self.volume
volume_r = Rank(volume)
data = pd.concat([high,volume_r],axis = 1,join = 'inner')
alpha = -1 * Corr(data,5)
alpha.columns = ['alpha62']
return alpha
@timer
def alpha63(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),6,1)
sma2 = SMA(pd.DataFrame(data['abs']),6,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha63']
return alpha
@timer
def alpha64(self):
vwap = self.vwap
volume = self.volume
close = self.close
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data1 = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr1 = Corr(data1,4)
corr1_decay = DecayLinear(corr1,4)
r1 = Rank(corr1_decay)
close_mean = Mean(close,60)
close_r = Rank(close)
close_mean_r = Rank(close_mean)
data2 = pd.concat([close_r,close_mean_r],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_max = TsMax(corr2,13)
corr2_max_decay = DecayLinear(corr2_max,14)
r2 = Rank(corr2_max_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) *-1)
alpha.columns = ['alpha64']
return alpha
@timer
def alpha65(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = pd.DataFrame(data['close_mean']/data['close'])
alpha.columns = ['alpha65']
return alpha
@timer
def alpha66(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha66']
return alpha
@timer
def alpha67(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),24,1)
sma2 = SMA(pd.DataFrame(data['abs']),24,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha67']
return alpha
@timer
def alpha68(self):
high = self.high
volume = self.volume
low = self.low
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['sum']= (data['High'] + data['Low'])/2
data['sum_delta'] = Delta(pd.DataFrame(data['sum']),1)
temp = data['sum_delta'] * (data['High'] - data['Low'])/data['Vol']
alpha = SMA(pd.DataFrame(temp),15,2)
alpha.columns = ['alpha68']
return alpha
@timer
def alpha69(self):
high = self.high
low = self.low
Open = self.open
dtm = DTM(Open,high)
dbm = DBM(Open,low)
dtm_sum = Sum(dtm,20)
dbm_sum = Sum(dbm,20)
data = pd.concat([dtm_sum,dbm_sum],axis = 1, join = 'inner')
data.columns = ['dtm','dbm']
data['temp1'] = (data['dtm'] - data['dbm'])/data['dtm']
data['temp2'] = (data['dtm'] - data['dbm'])/data['dbm']
data['temp1'][data['dtm'] <= data['dbm']] = 0
data['temp2'][data['dtm'] >= data['dbm']] = 0
alpha = pd.DataFrame(data['temp1'] + data['temp2'])
alpha.columns = ['alpha69']
return alpha
@timer
def alpha70(self):
amount = self.amt
alpha= STD(amount,6)
alpha.columns = ['alpha70']
return alpha
@timer
def alpha71(self):
close = self.close
close_mean = Mean(close,24)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha71']
return alpha
@timer
def alpha72(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),15,1)
alpha.columns = ['alpha72']
return alpha
@timer
def alpha73(self):
vwap = self.vwap
volume = self.volume
close = self.close
data1 = pd.concat([close,volume],axis = 1,join = 'inner')
corr1 = Corr(data1,10)
corr1_decay = DecayLinear(DecayLinear(corr1,16),4)
r1 = TsRank(corr1_decay,5)
volume_mean = Mean(volume,30)
data2 = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1,join ='inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns= ['alpha73']
return alpha
@timer
def alpha74(self):
vwap = self.vwap
volume = self.volume
low = self.low
volume_mean = Mean(volume,40)
volume_mean_sum = Sum(volume_mean,20)
data1 = pd.concat([low,vwap],axis = 1,join = 'inner')
data_sum = Sum(pd.DataFrame(data1['Low'] * 0.35 + data1['Vwap'] * 0.65),20)
data = pd.concat([volume_mean_sum,data_sum],axis = 1,join = 'inner')
corr = Corr(data,7)
r1 = Rank(corr)
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data_temp = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr2 = Corr(data_temp,6)
r2 = Rank(corr2)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha74']
return alpha
@timer
def alpha75(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1.columns = ['close','open']
data1['temp'] = 1
data1['temp'][data1['close'] <= data1['open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2.columns = ['close','open']
data2['tep'] = 1
data2['tep'][data2['close'] > data2['open']] = 0
temp = data1['temp'].unstack()
tep = data2['tep'].unstack()
tep1 = repmat(tep,1,np.size(temp,1))
data3 = temp * tep1
temp_result = data3.rolling(50,min_periods = 50).sum()
tep_result = tep.rolling(50,min_periods = 50).sum()
tep2_result = np.matlib.repmat(tep_result,1,np.size(temp,1))
result = temp_result/tep2_result
alpha = pd.DataFrame(result.stack())
alpha.columns = ['alpha75']
return alpha
@timer
def alpha76(self):
volume = self.volume
close = self.close
close_delay = Delay(close,1)
data = pd.concat([volume,close,close_delay],axis = 1,join = 'inner')
data.columns = ['volume','close','close_delay']
temp = pd.DataFrame(np.abs((data['close']/data['close_delay'] -1 )/data['volume']))
temp_std = STD(temp,20)
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp_std,temp_mean],axis = 1,join = 'inner')
data_temp.columns = ['std','mean']
alpha = pd.DataFrame(data_temp['std']/data_temp['mean'])
alpha.columns = ['alpha76']
return alpha
@timer
def alpha77(self):
vwap = self.vwap
volume = self.volume
low = self.low
high = self.high
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
temp = pd.DataFrame((data['High'] + data['Low'])/2 - data['Vwap'])
temp_decay = DecayLinear(temp,20)
r1 = Rank(temp_decay)
temp1 = pd.DataFrame((data['High'] + data['Low'])/2)
volume_mean = Mean(volume,40)
data2 = pd.concat([temp1,volume_mean],axis = 1,join = 'inner')
corr = Corr(data2,3)
corr_decay = DecayLinear(corr,6)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.min(r,axis = 1))
alpha.columns = ['alpha77']
return alpha
@timer
def alpha78(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
temp = pd.DataFrame((data['Low'] + data['High'] + data['Close'])/3)
temp.columns = ['temp']
temp_mean = Mean(temp,12)
temp_mean.columns = ['temp_mean']
temp2 = pd.concat([temp,temp_mean],axis = 1,join = 'inner')
tmp = pd.DataFrame(temp2['temp'] - temp2['temp_mean'])
data1 = pd.concat([close,temp_mean],axis = 1,join = 'inner')
temp_abs = pd.DataFrame(np.abs(data1['Close'] - data1['temp_mean']))
temp_abs_mean = Mean(temp_abs,12)
df = pd.concat([tmp,temp_abs_mean],axis = 1,join = 'inner')
df.columns = ['df1','df2']
alpha = pd.DataFrame(df['df1']/(df['df2'] * 0.015))
alpha.columns = ['alpha78']
return alpha
@timer
def alpha79(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),12,1)
sma2 = SMA(pd.DataFrame(data['abs']),12,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha79']
return alpha
@timer
def alpha80(self):
volume = self.volume
volume_delay = Delay(volume,5)
volume_delay.columns = ['volume_delay']
data = pd.concat([volume,volume_delay],axis = 1,join = 'inner')
alpha = (data['Vol'] - data['volume_delay'])/data['volume_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha80']
return alpha
@timer
def alpha81(self):
volume = self.volume
alpha = SMA(volume,21,2)
alpha.columns = ['alpha81']
return alpha
@timer
def alpha82(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),20,1)
alpha.columns = ['alpha82']
return alpha
@timer
def alpha83(self):
high = self.high
volume = self.volume
high_r = Rank(high)
volume_r = Rank(volume)
data = pd.concat([high_r,volume_r],axis = 1,join = 'inner')
corr = Corr(data,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha83']
return alpha
@timer
def alpha84(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,20)
alpha.columns = ['alpha84']
return alpha
@timer
def alpha85(self):
close = self.close
volume = self.volume
volume_mean = Mean(volume,20)
close_delta = Delta(close,7)
data1 = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
data1.columns = ['volume','volume_mean']
temp1 = pd.DataFrame(data1['volume']/data1['volume_mean'])
r1 = TsRank(temp1,20)
r2 = TsRank(-1 * close_delta,8)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha85']
return alpha
@timer
def alpha86(self):
close = self.close
close_delay20 = Delay(close,20)
close_delay10 = Delay(close,20)
data = pd.concat([close,close_delay20,close_delay10],axis = 1,join = 'inner')
data.columns = ['close','close_delay20','close_delay10']
temp = pd.DataFrame((data['close_delay20'] - data['close_delay10'])/10 - \
(data['close_delay10'] - data['close'])/10)
close_delta = Delta(close,1) * -1
data_temp = pd.concat([close_delta,temp],axis = 1,join = 'inner')
data_temp.columns = ['close_delta','temp']
data_temp['close_delta'][data_temp['temp'] > 0.25]= -1
data_temp['close_delta'][data_temp['temp'] < 0]= 1
alpha = pd.DataFrame(data_temp['close_delta'])
alpha.columns = ['alpha86']
return alpha
@timer
def alpha87(self):
vwap = self.vwap
high = self.high
low = self.low
Open = self.open
vwap_delta = Delta(vwap,4)
vwap_delta_decay = DecayLinear(vwap_delta,7)
r1 = Rank(vwap_delta_decay)
data = pd.concat([low,high,vwap,Open], axis = 1, join = 'inner')
temp = pd.DataFrame((data['Low'] * 0.1 + data['High'] * 0.9 - data['Vwap'])/\
(data['Open'] - 0.5 * (data['Low'] + data['High'])))
temp_decay = DecayLinear(temp,11)
r2 = TsRank(temp_decay,7)
r = pd.concat([r1,r2], axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(-1 * (r['r1'] + r['r2']))
alpha.columns = ['alpha87']
return alpha
@timer
def alpha88(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delta']
alpha = (data['close'] - data['close_delta'])/data['close_delta'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha88']
return alpha
@timer
def alpha89(self):
close = self.close
sma1 = SMA(close,13,2)
sma2 = SMA(close,27,2)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3],axis = 1, join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(2 *(data['temp'] - data['sma']))
alpha.columns = ['alpha89']
return alpha
@timer
def alpha90(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
corr = Corr(rank,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha90']
return alpha
@timer
def alpha91(self):
close = self.close
volume = self.volume
low = self.low
close_max = TsMax(close,5)
data1 = pd.concat([close,close_max], axis = 1,join = 'inner')
data1.columns = ['close','close_max']
r1 = Rank(pd.DataFrame(data1['close'] - data1['close_max']))
volume_mean = Mean(volume,40)
data2 = pd.concat([volume_mean,low], axis = 1, join = 'inner')
corr = Corr(data2,5)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha91']
return alpha
@timer
def alpha92(self):
volume = self.volume
vwap = self.vwap
close = self.close
data = pd.concat([close,vwap],axis = 1, join = 'inner')
data['price'] = data['Close'] * 0.35 + data['Vwap'] * 0.65
price_delta = Delta(pd.DataFrame(data['price']),2)
price_delta_decay = DecayLinear(price_delta,3)
r1 = Rank(price_delta_decay)
volume_mean = Mean(volume,180)
rank = pd.concat([volume_mean,close],axis = 1,join = 'inner')
corr = Corr(rank,13)
temp = pd.DataFrame(np.abs(corr))
temp_decay = DecayLinear(temp,5)
r2 = TsRank(temp_decay,15)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
alpha = pd.DataFrame(-1 * np.max(r, axis = 1))
alpha.columns = ['alpha92']
return alpha
@timer
def alpha93(self):
low = self.low
Open = self.open
open_delay = Delay(Open,1)
data = pd.concat([low,Open,open_delay],axis = 1,join = 'inner')
data.columns = ['low','open','open_delay']
temp1 = pd.DataFrame(data['open'] - data['low'])
temp2 = pd.DataFrame(data['open'] - data['open_delay'])
data_temp = pd.concat([temp1,temp2],axis = 1 ,join = 'inner')
temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
temp_max.columns = ['max']
data2 = pd.concat([data,temp_max],axis = 1,join = 'inner')
data2['temp'] = data2['max']
data2['temp'][data2['open'] >= data2['open_delay']] = 0
alpha = Sum(pd.DataFrame(data2['temp']),20)
alpha.columns = ['alpha93']
return alpha
@timer
def alpha94(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,30)
alpha.columns = ['alpha94']
return alpha
@timer
def alpha95(self):
amt = self.amt
alpha = STD(amt,20)
alpha.columns = ['alpha95']
return alpha
@timer
def alpha96(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = ( data['close'] - data['low_min'])/(data['high_max'] - data['low_min']) * 100
alpha_temp = SMA(pd.DataFrame(temp),3,1)
alpha = SMA(alpha_temp,3,1)
alpha.columns = ['alpha96']
return alpha
@timer
def alpha97(self):
volume = self.volume
alpha = STD(volume,10)
alpha.columns = ['alpha97']
return alpha
@timer
def alpha98(self):
close = self.close
close_mean = Mean(close,100)
close_mean_delta = Delta(close_mean,100)
close_delay = Delay(close,100)
data = pd.concat([close_mean_delta,close_delay],axis = 1,join = 'inner')
data.columns = ['delta','delay']
temp = pd.DataFrame(data['delta']/ data['delay'])
close_delta = Delta(close,3)
close_min = TsMin(close,100)
data_temp = pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner')
data_temp.columns = ['close','close_delta','close_min','temp']
data_temp['diff'] = (data_temp['close'] - data_temp['close_min']) * -1
data_temp['diff'][data_temp['temp'] < 0.05] = 0
data_temp['close_delta'] = data_temp['close_delta'] * -1
data_temp['close_delta'][data_temp['temp'] >= 0.05]= 0
alpha = pd.DataFrame(data_temp['close_delta'] + data_temp['diff'])
alpha.columns = ['alpha98']
return alpha
@timer
def alpha99(self):
close = self.close
volume = self.volume
r1 = Rank(close)
r2 = Rank(volume)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
cov = Cov(r,5)
alpha = -1 * Rank(cov)
alpha.columns = ['alpha99']
return alpha
@timer
def alpha100(self):
volume = self.volume
alpha = STD(volume,20)
alpha.columns = ['alpha100']
return alpha
@timer
def alpha101(self):
close = self.close
volume = self.volume
high = self.high
vwap = self.vwap
volume_mean = Mean(volume,30)
volume_mean_sum = Sum(volume_mean,37)
data1 = pd.concat([close,volume_mean_sum], axis = 1, join = 'inner')
corr1 = Corr(data1,15)
r1 = Rank(corr1)
data2 = pd.concat([high,vwap],axis = 1, join = 'inner')
temp = pd.DataFrame(data2['High'] * 0.1 + data2['Vwap'] * 0.9)
temp_r = Rank(temp)
volume_r = Rank(volume)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,11)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] < r['r2']] = -1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha101']
return alpha
@timer
def alpha102(self):
volume = self.volume
temp = Delta(volume,1)
temp.columns = ['temp']
temp['max'] = temp['temp']
temp['max'][temp['temp'] < 0 ] = 0
temp['abs'] = np.abs(temp['temp'])
sma1 = SMA(pd.DataFrame(temp['max']),6,1)
sma2 = SMA(pd.DataFrame(temp['abs']),6,1)
sma = pd.concat([sma1,sma2], axis = 1 ,join ='inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/ sma['sma2'] * 100)
alpha.columns = ['alpha102']
return alpha
@timer
def alpha103(self):
low = self.low
lowday = Lowday(low,20)
alpha = (20 - lowday)/20.0 * 100
alpha.columns = ['alpha103']
return alpha
@timer
def alpha104(self):
close = self.close
volume = self.volume
high = self.high
data = pd.concat([high,volume], axis = 1, join = 'inner')
corr = Corr(data,5)
corr_delta = Delta(corr,5)
close_std = STD(close,20)
r1 = Rank(close_std)
temp = pd.concat([corr_delta,r1], axis = 1, join = 'inner')
temp.columns = ['delta','r']
alpha = pd.DataFrame(-1 * temp['delta'] * temp['r'])
alpha.columns = ['alpha104']
return alpha
@timer
def alpha105(self):
volume = self.volume
Open = self.open
volume_r = Rank(volume)
open_r = Rank(Open)
rank = pd.concat([volume_r,open_r],axis = 1, join = 'inner')
alpha = -1 * Corr(rank,10)
alpha.columns = ['alpha105']
return alpha
@timer
def alpha106(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
alpha = pd.DataFrame(data['close'] - data['close_delay'])
alpha.columns = ['alpha106']
return alpha
@timer
def alpha107(self):
Open = self.open
high = self.high
close = self.close
low = self.low
high_delay = Delay(high,1)
close_delay = Delay(close,1)
low_delay = Delay(low,1)
data = pd.concat([high_delay,close_delay,low_delay,Open], axis = 1, join = 'inner')
data.columns = ['high_delay','close_delay','low_delay','open']
r1 = Rank(pd.DataFrame(data['open'] - data['high_delay']))
r2 = Rank(pd.DataFrame(data['open'] - data['close_delay']))
r3 = Rank(pd.DataFrame(data['open'] - data['low_delay']))
alpha = -1 * r1 * r2 * r3
alpha.columns = ['alpha107']
return alpha
@timer
def alpha108(self):
high = self.high
volume = self.volume
vwap = self.vwap
high_min = TsMin(high,2)
data1 = pd.concat([high,high_min], axis = 1, join = 'inner')
data1.columns = ['high','high_min']
r1 = Rank(pd.DataFrame(data1['high'] - data1['high_min']))
volume_mean = Mean(volume,120)
rank = pd.concat([vwap,volume_mean],axis = 1, join = 'inner')
corr = Corr(rank,6)
r2 = Rank(corr)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = r['r1'] * r['r2'] * -1
alpha.columns = ['alpha108']
return alpha
@timer
def alpha109(self):
high = self.high
low = self.low
data = pd.concat([high,low],axis = 1, join = 'inner')
temp = SMA(pd.DataFrame(data['High'] - data['Low']),10,2)
sma = SMA(temp,10,2)
sma_temp = pd.concat([temp,sma],axis = 1, join = 'inner')
sma_temp.columns = ['temp','sma']
alpha = pd.DataFrame(sma_temp['temp']/sma_temp['sma'])
alpha.columns = ['alpha109']
return alpha
@timer
def alpha110(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([high,low,close_delay], axis = 1, join = 'inner')
data['max1'] = data['High'] - data['close_delay']
data['max2'] = data['close_delay'] - data['Low']
data['max1'][data['max1'] < 0] = 0
data['max2'][data['max2'] < 0] = 0
s1 = Sum(pd.DataFrame(data['max1']),20)
s2 = Sum(pd.DataFrame(data['max2']),20)
s = pd.concat([s1,s2], axis = 1 , join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'])
alpha.columns = ['alpha110']
return alpha
@timer
def alpha111(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
temp = pd.DataFrame(data['Vol'] * (2 * data['Close'] - data['Low'] - data['High'])\
/(data['High'] - data['Low']))
sma1 = SMA(temp,11,2)
sma2 = SMA(temp,4,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] - sma['sma2'])
alpha.columns = ['alpha111']
return alpha
@timer
def alpha112(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close, close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = 1
data['temp'][data['close'] > data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha112']
return alpha
@timer
def alpha113(self):
close = self.close
volume = self.volume
close_delay = Delay(close,5)
close_delay_mean = Mean(close_delay,20)
data1 = pd.concat([close,volume],axis = 1, join = 'inner')
corr = Corr(data1,2)
r1 = Rank(close_delay_mean)
data2 = pd.concat([r1,corr], axis = 1, join = 'inner')
data2.columns = ['r1','corr']
r1 = pd.DataFrame(data2['r1'] * data2['corr'])
close_sum5 = Sum(close,5)
close_sum20 = Sum(close,20)
data3 = pd.concat([close_sum5,close_sum20],axis = 1, join = 'inner')
corr2 = Corr(data3,2)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha113']
return alpha
@timer
def alpha114(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
vwap = self.vwap
close_mean = Mean(close,5)
data = pd.concat([high,low,close_mean], axis = 1, join = 'inner')
data.columns = ['high','low','close_mean']
temp = pd.DataFrame(data['high'] - data['low'] / data['close_mean'])
temp_delay = Delay(temp,2)
r1 = TsRank(temp_delay,5)
temp1 = pd.concat([temp,vwap,close], axis = 1, join = 'inner')
temp1.columns = ['temp','vwap','close']
tep = pd.DataFrame(temp1['temp']/(temp1['vwap'] - temp1['close']))
r2 = TsRank(volume,5)
data2 = pd.concat([r2,tep], axis = 1, join = 'inner')
data2.columns = ['r2','tep']
tep1 = pd.DataFrame(data2['r2']/data2['tep'])
r3 = TsRank(tep1,5)
r = pd.concat([r1,r3],axis = 1, join = 'inner')
r.columns = ['r1','r3']
alpha = pd.DataFrame(r['r1'] + r['r3'])
alpha.columns = ['alpha114']
return alpha
@timer
def alpha115(self):
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,30)
price = pd.concat([high,low], axis = 1, join = 'inner')
price.columns = ['high','low']
price_temp = price['high'] * 0.9 + price['low'] * 0.1
data = pd.concat([price_temp,volume_mean],axis = 1, join = 'inner')
corr = Corr(data,10)
r1 = Rank(corr)
data2 = pd.concat([high,low], axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
temp_r = TsRank(temp,4)
volume_r = TsRank(volume,10)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,7)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha115']
return alpha
@timer
def alpha116(self):
close = self.close
alpha = RegResi(0,close,None,20)
alpha.columns = ['alpha116']
return alpha
@timer
def alpha117(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
ret = self.ret
r1 = TsRank(volume,32)
data1 = pd.concat([close,high,low],axis = 1, join = 'inner')
r2 = TsRank(pd.DataFrame(data1['Close'] + data1['High'] - data1['Low']),16)
r3 = TsRank(ret,32)
r = pd.concat([r1,r2,r3], axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(r['r1'] * (1 - r['r2']) * (1 - r['r3']))
alpha.columns = ['alpha117']
return alpha
@timer
def alpha118(self):
high = self.high
low = self.low
Open = self.open
data = pd.concat([high,low,Open], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame(data['High'] - data['Open']),20)
s2 = Sum(pd.DataFrame(data['Open'] - data['Low']),20)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha118']
return alpha
@timer
def alpha119(self):
Open = self.open
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,5)
volume_mean_sum = Sum(volume_mean,26)
data1 = pd.concat([vwap,volume_mean_sum],axis = 1, join = 'inner')
corr1 = Corr(data1,5)
corr1_decay = DecayLinear(corr1,7)
r1 = Rank(corr1_decay)
open_r = Rank(Open)
volume_mean2 = Mean(volume,15)
volume_mean2_r = Rank(volume_mean2)
data2 = pd.concat([open_r, volume_mean2_r], axis = 1, join = 'inner')
corr2 = Corr(data2,21)
corr2_min = TsMin(corr2,9)
corr2_min_r = TsRank(corr2_min,7)
corr_min_r_decay = DecayLinear(corr2_min_r,8)
r2 = Rank(corr_min_r_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha119']
return alpha
@timer
def alpha120(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close], axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vwap'] - data['Close']))
r2 = Rank(pd.DataFrame(data['Vwap'] + data['Close']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha120']
return alpha
@timer
def alpha121(self):
vwap = self.vwap
volume = self.volume
vwap_r = TsRank(vwap,20)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,2)
data = pd.concat([vwap_r,volume_mean_r], axis = 1, join = 'inner')
corr= Corr(data,18)
temp = TsRank(corr,3)
vwap_min = TsMin(vwap,12)
data2 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data2.columns = ['vwap','vwap_min']
rank = Rank(pd.DataFrame(data2['vwap'] - data2['vwap_min']))
data3 = pd.concat([rank,temp],axis = 1, join = 'inner')
data3.columns = ['rank','temp']
alpha = pd.DataFrame(np.power(data3['rank'],data3['temp']) * -1)
alpha.columns = ['alpha121']
return alpha
@timer
def alpha122(self):
close = self.close
close_ln = pd.DataFrame(np.log(close))
temp1 = SMA(close_ln,13,2)
sma1 = SMA(temp1,13,2)
sma2 = SMA(sma1,13,2)
sma3 = SMA(sma2,13,2)
sma3_delay = Delay(sma3,1)
data = pd.concat([sma3,sma3_delay],axis = 1, join = 'inner')
data.columns = ['sma','sma_delay']
alpha = pd.DataFrame(data['sma']/data['sma_delay'])
alpha.columns = ['alpha122']
return alpha
@timer
def alpha123(self):
volume = self.volume
high = self.high
low = self.low
data1 = pd.concat([high,low], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame((data1['High'] + data1['Low'])/2),20)
volume_mean = Mean(volume,60)
s2 = Sum(volume_mean,20)
data2 = pd.concat([s1,s2], axis = 1, join = 'inner')
corr1 = Corr(data2,9)
data3 = pd.concat([low,volume], axis = 1, join = 'inner')
corr2 = Corr(data3,6)
corr1_r = Rank(corr1)
corr2_r = Rank(corr2)
data = pd.concat([corr1_r,corr2_r], axis = 1, join = 'inner')
data.columns = ['r1','r2']
data['alpha'] = -1
data['alpha'][data['r1'] >= data['r2']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha123']
return alpha
@timer
def alpha124(self):
close = self.close
vwap = self.vwap
close_max = TsMax(close,30)
close_max_r = Rank(close_max)
close_max_r_decay = DecayLinear(close_max_r,2)
close_max_r_decay.columns = ['decay']
data = pd.concat([close,vwap,close_max_r_decay], axis = 1, join ='inner')
alpha = pd.DataFrame((data['Close'] - data['Vwap'])/data['decay'])
alpha.columns = ['alpha124']
return alpha
@timer
def alpha125(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,80)
data1 = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr1 = Corr(data1,17)
data2 = pd.concat([close,vwap], axis = 1, join = 'inner')
temp2 = pd.DataFrame(0.5*(data2['Close'] + data2['Vwap']))
temp2_delta = Delta(temp2,3)
corr1_decay = DecayLinear(corr1,20)
r1 = Rank(corr1_decay)
temp2_delta_decay = DecayLinear(temp2_delta,16)
r2 = Rank(temp2_delta_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha125']
return alpha
@timer
def alpha126(self):
close = self.close
high = self.high
low = self.low
data = pd.concat([close,high,low], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] + data['High'] + data['Low'])/3)
alpha.columns = ['alpha126']
return alpha
@timer
def alpha127(self):
close = self.close
close_max = TsMax(close,12)
data = pd.concat([close,close_max], axis = 1, join = 'inner')
data.columns = ['close','close_max']
alpha = pd.DataFrame((data['close'] - data['close_max'])/data['close_max'])
alpha.columns = ['alpha127']
return alpha
@timer
def alpha128(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
data = pd.concat([close,high,low,volume], axis = 1, join = 'inner')
data['temp1'] = (data['Close'] + data['Low'] + data['High'])/3
data['temp2'] = data['temp1'] * data['Vol']
data['temp3'] = data['temp1'] * data['Vol']
temp_delay = Delay(pd.DataFrame(data['temp1']),1)
temp_delay.columns = ['temp_decay']
data = pd.concat([data,temp_delay], axis = 1, join = 'inner')
data['temp2'][data['temp1'] < data['temp_decay']] = 0
data['temp3'][data['temp1'] > data['temp_decay']] = 0
s1 = Sum(pd.DataFrame(data['temp2']),14)
s2 = Sum(pd.DataFrame(data['temp3']),14)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(100 - 100/(1+ s['s1']/s['s2']))
alpha.columns = ['alpha128']
return alpha
@timer
def alpha129(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['abs'] = np.abs(data['close'] - data['close_delay'])
data['temp'] = data['abs']
data['temp'][data['close'] < data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha129']
return alpha
@timer
def alpha130(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,40)
data1 = pd.concat([high,low],axis = 1, join = 'inner')
temp1 = pd.DataFrame((data1['High'] + data1['Low'])/2)
rank1 = pd.concat([temp1,volume_mean], axis = 1, join = 'inner')
corr = Corr(rank1,9)
close_r = Rank(close)
volume_r = Rank(volume)
data2 = pd.concat([close_r,volume_r],axis = 1, join = 'inner')
corr2 = Corr(data2,7)
corr_decay = DecayLinear(corr,10)
r1 = Rank(corr_decay)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha130']
return alpha
@timer
def alpha131(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,50)
data1 = pd.concat([close,volume_mean], axis = 1, join = 'inner')
corr = Corr(data1,18)
vwap_delta = Delta(vwap,1)
temp2 = TsRank(corr,18)
data2 = pd.concat([vwap_delta,temp2],axis = 1, join = 'inner')
data2.columns = ['vwap_delta','temp2']
temp3 = np.power(data2['vwap_delta'],data2['temp2'])
alpha = Rank(pd.DataFrame(temp3))
alpha.columns = ['alpha131']
return alpha
@timer
def alpha132(self):
amt = self.amt
alpha = Mean(amt,20)
alpha.columns = ['alpha132']
return alpha
@timer
def alpha133(self):
low = self.low
high = self.high
highday = Highday(high,20)
lowday = Lowday(low,20)
data = pd.concat([highday,lowday],axis = 1, join = 'inner')
data.columns = ['highday','lowday']
alpha = (20 - data['highday']/20.0) * 100 - (20 - data['lowday']/20.0) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha133']
return alpha
@timer
def alpha134(self):
close = self.close
volume = self.volume
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,volume,close_delay], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] - data['close_delay'])/data['close_delay'])
alpha.columns = ['alpha134']
return alpha
@timer
def alpha135(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1 , join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
alpha = SMA(temp_delay,20,1)
alpha.columns = ['alpha135']
return alpha
@timer
def alpha136(self):
volume = self.volume
Open = self.open
ret = self.ret
ret_delta = Delta(ret,3)
ret_delta_r = Rank(ret_delta)
data = pd.concat([Open,volume],axis = 1, join = 'inner')
corr = Corr(data,10)
data_temp = pd.concat([ret_delta_r,corr],axis = 1, join = 'inner')
data_temp.columns = ['ret_delta','corr']
alpha = pd.DataFrame(-1 * data_temp['ret_delta'] * data_temp['corr'])
alpha.columns = ['alpha136']
return alpha
@timer
def alpha137(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
alpha = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha.columns = ['alpha137']
return alpha
@timer
def alpha138(self):
vwap = self.vwap
volume = self.volume
low = self.low
data1 = pd.concat([low,vwap], axis = 1, join = 'inner')
temp1 = pd.DataFrame(data1['Low'] * 0.7 + data1['Vwap'] * 0.3)
temp1_delta = Delta(temp1,3)
temp1_delta_decay = DecayLinear(temp1_delta,20)
r1 = Rank(temp1_delta_decay)
low_r = TsRank(low,8)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,17)
data2 = pd.concat([low_r,volume_mean_r],axis = 1, join = 'inner')
corr = Corr(data2,5)
corr_r = TsRank(corr,19)
corr_r_decay = DecayLinear(corr_r,16)
r2 = TsRank(corr_r_decay,7)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha138']
return alpha
@timer
def alpha139(self):
Open = self.open
volume = self.volume
data = pd.concat([Open,volume], axis = 1, join = 'inner')
alpha = -1 * Corr(data,10)
alpha.columns = ['alpha139']
return alpha
@timer
def alpha140(self):
Open = self.open
volume = self.volume
high = self.high
low = self.low
close = self.close
open_r = Rank(Open)
low_r = Rank(low)
high_r = Rank(high)
close_r = Rank(close)
data1 = pd.concat([open_r,low_r,high_r,close_r],axis = 1, join = 'inner')
data1.columns = ['open_r','low_r','high_r','close_r']
temp = pd.DataFrame(data1['open_r'] + data1['low_r'] - \
(data1['high_r'] + data1['close_r']))
close_r_temp = TsRank(close,8)
volume_mean = Mean(volume,70)
volume_mean_r = TsRank(volume_mean,20)
data2 = pd.concat([close_r_temp,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data2,8)
temp_decay = DecayLinear(temp,8)
r1 = Rank(temp_decay)
corr_decay = DecayLinear(corr,7)
r2 = TsRank(corr_decay,3)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
alpha = pd.DataFrame(np.min(r))
alpha.columns = ['alpha140']
return alpha
@timer
def alpha141(self):
volume = self.volume
high = self.high
volume_mean = Mean(volume,15)
high_r = Rank(high)
volume_mean_r = Rank(volume_mean)
data = pd.concat([high_r,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data,9)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha141']
return alpha
@timer
def alpha142(self):
close = self.close
volume = self.volume
close_r = TsRank(close,10)
r1 = Rank(close_r)
close_delta = Delta(close,1)
close_delta_delta = Delta(close_delta,1)
r2 = Rank(close_delta_delta)
volume_mean = Mean(volume,20)
data = pd.concat([volume,volume_mean], axis = 1, join = 'inner')
data.columns = ['v','v_m']
temp = pd.DataFrame(data['v']/data['v_m'])
temp_r = TsRank(temp,5)
r3 = Rank(temp_r)
r = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(- 1* r['r1'] * r['r2'] * r['r3'])
alpha.columns= ['alpha142']
return alpha
@timer
def alpha143(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] - data['close_delay'])/data['close_delay'])
temp.columns= ['temp']
data_temp = pd.concat([data,temp],axis = 1, join = 'inner')
data_temp['temp'][data_temp['close'] <= data_temp['close_delay']] = 1
temp_unstack = data_temp['temp'].unstack()
temp_unstack.iloc[0,:] = 1
df = np.cumprod(temp_unstack,axis = 0)
alpha = df.stack()
alpha.columns = ['alpha143']
return alpha
@timer
def alpha144(self):
close = self.close
amt = self.amt
close_delay = Delay(close,1)
data = pd.concat([close,close_delay,amt], axis = 1, join = 'inner')
data.columns = ['close','close_delay','amt']
data['temp'] = np.abs(data['close']/data['close_delay'] - 1)/data['amt']
data['sign'] = 1
data['sign'][data['close'] >= data['close_delay']] = 0
tep1 = Sum(pd.DataFrame(data['sign'] * data['temp']),20)
tep2 = Count(0,pd.DataFrame(data['close_delay']),pd.DataFrame(data['close']),20)
data2 = pd.concat([tep1,tep2], axis = 1, join = 'inner')
data2.columns = ['tep1','tep2']
alpha = pd.DataFrame(data2['tep1']/data2['tep2'])
alpha.columns = ['alpha144']
return alpha
@timer
def alpha145(self):
volume = self.volume
volume_mean9 = Mean(volume,9)
volume_mean26 = Mean(volume,26)
volume_mean12 = Mean(volume,12)
data = pd.concat([volume_mean9,volume_mean26,volume_mean12], axis = 1, join = 'inner')
data.columns = ['m9','m26','m12']
alpha = pd.DataFrame((data['m9'] - data['m26'])/data['m12'] * 100)
alpha.columns = ['alpha145']
return alpha
@timer
def alpha146(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] -data['close_delay'])/data['close_delay'])
sma1 = SMA(temp,61,2)
data2 = pd.concat([temp,sma1], axis = 1, join = 'inner')
data2.columns = ['temp1','sma1']
data2['temp2'] = data2['temp1'] - data2['sma1']
temp2_mean = Mean(pd.DataFrame(data2['temp2']),20)
sma2 = SMA(pd.DataFrame(data2['temp1'] - data2['temp2']),61,2)
data_temp = pd.concat([temp2_mean,pd.DataFrame(data2['temp2']),sma2], axis = 1 , join = 'inner')
data_temp.columns = ['temp2_mean','temp2','sma2']
alpha = data_temp['temp2_mean'] * data_temp['temp2'] / data_temp['sma2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha146']
return alpha
@timer
def alpha147(self):
close = self.close
close_mean = Mean(close,12)
alpha = RegBeta(0,close_mean,None,12)
alpha.columns = ['alpha147']
return alpha
@timer
def alpha148(self):
Open = self.open
volume = self.volume
volume_mean = Mean(volume,60)
volume_mean_s = Sum(volume_mean,9)
data = pd.concat([Open,volume_mean_s],axis = 1, join = 'inner')
corr = Corr(data,6)
r1 = Rank(corr)
open_min = TsMin(Open,14)
data2 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data2.columns = ['open','open_min']
r2 = Rank(pd.DataFrame(data2['open'] - data2['open_min']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = -1
r['alpha'][r['r1'] > r['r2']] = 0
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha148']
return alpha
@timer
def alpha149(self):
close = self.close
close_index = self.close_index
close_delay = Delay(close,1)
close_index_delay = Delay(close_index,1)
data_index = pd.concat([close_index,close_index_delay], axis = 1, join = 'inner')
data_index.columns = ['close','close_delay']
data_index['delta'] = data_index['close']/data_index['close_delay'] - 1
data_index['judge'] = 1
data_index['judge'][data_index['close'] >= data_index['close_delay']] = 0
data_index['delta'][data_index['judge'] == 0] = np.nan
# index_delta_unstack = index_delta_unstack.dropna()
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['delta'] = data['close'] / data['close_delay'] - 1
df1 = pd.DataFrame(data['delta'])
df2 = pd.DataFrame(data_index['delta'])
alpha = RegBeta(1,df1,df2,252)
alpha.columns = ['alpha149']
return alpha
@timer
def alpha150(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
alpha = (data['Close'] + data['High'] + data['Low'])/3 * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha150']
return alpha
@timer
def alpha151(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close'] - data['close_delay'])
alpha = SMA(temp,20,1)
alpha.columns = ['alpha151']
return alpha
@timer
def alpha152(self):
close = self.close
close_delay = Delay(close,9)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
sma1 = SMA(temp_delay,9,1)
sma1_delay = Delay(sma1,1)
sma1_delay_mean1 = Mean(sma1_delay,12)
sma1_delay_mean2 = Mean(sma1_delay,26)
data_temp = pd.concat([sma1_delay_mean1,sma1_delay_mean2],axis = 1, join = 'inner')
data_temp.columns = ['m1','m2']
alpha = SMA(pd.DataFrame(data_temp['m1'] - data_temp['m2']),9,1)
alpha.columns = ['alpha152']
return alpha
@timer
def alpha153(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close_mean3, close_mean6, close_mean12, close_mean24], axis = 1 ,join ='inner')
alpha = pd.DataFrame(np.mean(data, axis = 1))
alpha.columns = ['alpha153']
return alpha
@timer
def alpha154(self):
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,180)
data = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr = Corr(data,18)
vwap_min = TsMin(vwap,16)
data1 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data1.columns = ['vwap','vwap_min']
temp = pd.DataFrame(data1['vwap'] - data1['vwap_min'])
data_temp = pd.concat([corr,temp], axis = 1, join = 'inner')
data_temp.columns = ['corr','temp']
data_temp['alpha'] = 1
data_temp['alpha'][data_temp['corr'] >= data_temp['temp']] = 0
alpha = pd.DataFrame(data_temp['alpha'])
alpha.columns = ['alpha154']
return alpha
@timer
def alpha155(self):
volume = self.volume
sma1 = SMA(volume,13,2)
sma2 = SMA(volume,26,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3], axis = 1 ,join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(data['temp'] - data['sma'])
alpha.columns = ['alpha155']
return alpha
@timer
def alpha156(self):
vwap = self.vwap
Open = self.open
low = self.low
vwap_delta = Delta(vwap,5)
vwap_delta_decay = DecayLinear(vwap_delta,3)
r1 = Rank(vwap_delta_decay)
data1 = pd.concat([Open,low],axis = 1, join = 'inner')
temp = -1 * Delta(pd.DataFrame(data1['Open'] * 0.15 + data1['Low'] * 0.85),2)
temp_decay = DecayLinear(temp,3)
r2 = Rank(temp_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(- 1 *np.max(r, axis = 1))
alpha.columns = ['alpha156']
return alpha
@timer
def alpha157(self):
close = self.close
ret = self.ret
close_delta = Delta(close,5)
close_delta_r = Rank(Rank(close_delta) * -1)
r1 = TsMin(close_delta_r,2)
ret_delay = Delay(-1 * ret,6)
r2 = TsRank(ret_delay,5)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
temp = pd.DataFrame(r['r1'] + r['r2'])
alpha = TsMin(temp,5)
alpha.columns = ['alpha157']
return alpha
@timer
def alpha158(self):
high = self.high
low = self.low
close = self.close
temp = SMA(close,15,2)
temp.columns = ['temp']
data = pd.concat([high,low,close,temp],axis = 1 , join = 'inner')
alpha =(data['High'] + data['Low'] - 2 * data['temp'] )/data['Close']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha158']
return alpha
@timer
def alpha159(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
data1 = pd.concat([low,close_delay],axis = 1, join = 'inner')
data2 = pd.concat([high, close_delay], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.min(data1,axis = 1))
temp2= pd.DataFrame(np.max(data2,axis = 1))
temp = pd.concat([temp1,temp2], axis = 1 ,join = 'inner')
temp.columns = ['temp1','temp2']
temp1_sum6 = Sum(temp1,6)
temp1_sum12 = Sum(temp1,12)
temp1_sum24 = Sum(temp1,24)
tep = pd.DataFrame(temp['temp2'] - temp['temp1'])
s6 = Sum(tep,6)
s12 = Sum(tep,12)
s24 = Sum(tep,24)
data3 = pd.concat([temp1_sum6,temp1_sum12,temp1_sum24,s6,s12,s24], axis = 1 ,join = 'inner')
data3.columns = ['ts6','ts12','ts24','s6','s12','s24']
temp3 = pd.DataFrame(data3['ts6']/data3['s6'] * 12 * 24 + data3['ts12']/data3['s12'] * 6 * 24 \
+ data3['ts24']/data3['s24'] * 6 * 24)
alpha = temp3 / (6*12 + 6*24 + 12*24) * 100
alpha.columns = ['alpha159']
return alpha
@timer
def alpha160(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_std','close_delay']
data['close_std'][data['close'] >= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data['close_std']),20,1)
alpha.columns = ['alpha160']
return alpha
@timer
def alpha161(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data1 = pd.concat([high,low],axis = 1 , join = 'inner')
diff = pd.DataFrame(data1['High'] - data1['Low'])
data2 = pd.concat([close_delay,high], axis = 1, join ='inner')
abs1 = pd.DataFrame(np.abs(data2['close_delay'] - data2['High']))
data3 = pd.concat([diff,abs1], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.max(data3,axis = 1))
data4 = pd.concat([close_delay,low],axis = 1, join = 'inner')
temp2 = pd.DataFrame(np.abs(data4['close_delay'] -data4['Low']))
data = pd.concat([temp1,temp2],axis =1 , join = 'inner')
data.columns = ['temp1','temp2']
temp = pd.DataFrame(np.max(data, axis = 1))
alpha = Mean(temp,12)
alpha.columns = ['alpha161']
return alpha
@timer
def alpha162(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['max']= data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
temp1 = SMA(pd.DataFrame(data['max']),12,1)
temp2 = SMA(pd.DataFrame(data['abs']),12,1)
data1 = pd.concat([temp1,temp2], axis = 1, join = 'inner')
data1.columns = ['temp1','temp2']
tep = pd.DataFrame(data1['temp1']/data1['temp2'])
temp3 = TsMin(tep,12)
temp4 = TsMax(tep,12)
data_temp = pd.concat([tep,temp3,temp4], axis = 1, join = 'inner')
data_temp.columns = ['tep','temp3','temp4']
alpha = (data_temp['tep'] - data_temp['temp3']/data_temp['temp4']) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha162']
return alpha
@timer
def alpha163(self):
low = self.low
high = self.high
volume = self.volume
ret = self.ret
vwap = self.vwap
volume_mean = Mean(volume,20)
data = pd.concat([high,low,vwap,ret,volume_mean],axis = 1, join = 'inner')
data.columns = ['high','low','vwap','ret','volume_mean']
temp = pd.DataFrame(-1 *data['ret'] * data['volume_mean'] *data['vwap'] * \
(data['high'] - data['low']))
alpha = Rank(temp)
alpha.columns = ['alpha163']
return alpha
@timer
def alpha164(self):
close = self.close
high = self.high
low = self.low
close_delay = Delay(close,1)
data = pd.concat([close,high,low,close_delay],axis = 1, join = 'inner')
data.columns = ['close','high','low','close_delay']
data['temp'] = 1/(data['close'] - data['close_delay'])
data_min = TsMin(pd.DataFrame(data['temp']),12)
data_min.columns = ['min']
data2 = pd.concat([data,data_min],axis = 1, join = 'inner')
data2['tep'] = data2['temp'] - data2['min']/(data2['high'] - data2['low'])
data2['tep'][data['close'] <= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data2['tep']) * 100,13,2)
alpha.columns = ['alpha164']
return alpha
@timer
def alpha165(self):
close = self.close
close_mean = Mean(close,48)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame(data['close'] - data['close_mean'])
temp_sum = Sum(temp,48)
temp_sum_min = TsMin(temp_sum,48)
temp_sum_max = TsMax(temp_sum,48)
close_std = STD(close,48)
data_temp = pd.concat([temp_sum_min,temp_sum_max,close_std], axis = 1, join = 'inner')
data_temp.columns = ['min','max','std']
alpha = (data_temp['max'] - data_temp['min'])/data_temp['std']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha165']
return alpha
@timer
def alpha166(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_mean = Mean(temp,20)
data1 = pd.concat([temp,temp_mean], axis = 1, join = 'inner')
data1.columns = ['temp','temp_mean']
temp2 = Sum(pd.DataFrame(data1['temp'] - data1['temp_mean']),20) * 20 * 19
temp3 = Sum(temp,20) * 19 * 18
data2 = pd.concat([temp2,temp3], axis = 1, join = 'inner')
data2.columns = ['temp2','temp3']
alpha = np.power(data2['temp2'],1.5)/np.power(data2['temp3'],1.5)
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha166']
return alpha
@timer
def alpha167(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = data['close'] - data['close_delay']
data['temp'][data['close'] <= data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha167']
return alpha
@timer
def alpha168(self):
volume = self.volume
volume_mean = Mean(volume,20)
data = pd.concat([volume,volume_mean], axis = 1, join = 'inner')
data.columns = ['volume','volume_mean']
alpha = data['volume']/data['volume_mean'] * -1
alpha.columns = ['alpha168']
return alpha
@timer
def alpha169(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp1 = pd.DataFrame(data['close'] - data['close_delay'])
sma = SMA(temp1,9,1)
temp2 = Delay(sma,1)
temp2_mean12 = Mean(temp2,12)
temp2_mean26 = Mean(temp2,26)
data2 = pd.concat([temp2_mean12,temp2_mean26], axis = 1, join ='inner')
data2.columns = ['mean1','mean2']
alpha = SMA(pd.DataFrame(data2['mean1'] - data2['mean2']),10,1)
alpha.columns = ['alpha169']
return alpha
@timer
def alpha170(self):
close = self.close
high = self.high
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,20)
data1 = pd.concat([high,close,volume,volume_mean], axis = 1, join = 'inner')
data1.columns =['high','close','volume','volume_mean']
temp1 = pd.DataFrame(data1['high']/data1['close'] * data1['volume']/data1['volume_mean'])
r1 = Rank(temp1)
high_mean = Mean(high,5)
vwap_delay = Delay(vwap,5)
data2 = pd.concat([high,close,high_mean], axis = 1, join = 'inner')
data2.columns = ['high','close','high_mean']
temp2 = pd.DataFrame((data2['high'] - data2['close'])/data2['high_mean'])
temp2_r = Rank(temp2)
data3 = pd.concat([vwap,vwap_delay], axis = 1, join = 'inner')
data3.columns = ['vwap','vwap_delay']
temp3 = pd.DataFrame(data3['vwap'] - data3['vwap_delay'])
temp3_r = Rank(temp3)
rank = pd.concat([temp2_r,temp3_r], axis = 1, join = 'inner')
rank.columns = ['r1','r2']
r2 = pd.DataFrame(rank['r1'] - rank['r2'])
data_temp = pd.concat([r1,r2],axis = 1, join = 'inner')
data_temp.columns = ['r1','r2']
alpha = pd.DataFrame(data_temp['r1'] * data_temp['r2'])
alpha.columns = ['alpha170']
return alpha
@timer
def alpha171(self):
high = self.high
close = self.close
low = self.low
Open = self.open
data = pd.concat([high,close,low,Open],axis = 1, join = 'inner')
alpha = -1 * (data['Low'] - data['Close']) * np.power(data['Open'],5)/\
((data['Close'] - data['High']) * np.power(data['Close'],5))
alpha.columns = ['alpha171']
return alpha
@timer
def alpha172(self):
high = self.high
low = self.low
hd = HD(high)
ld = LD(low)
data = pd.concat([hd,ld],axis = 1, join = 'inner')
data.columns = ['hd','ld']
data['temp'] = 0
data['temp'][((data['hd'] > data['ld'])& (data['hd'] > 0)) | \
((data['ld'] > data['hd'])& (data['ld'] > 0))] = 1
alpha = Mean(pd.DataFrame(data['temp']),6)
alpha.columns = ['alpha172']
return alpha
@timer
def alpha173(self):
close = self.close
close_ln = pd.DataFrame(np.log(close))
temp1 = SMA(close,13,2)
temp2 = SMA(close_ln,13,2)
temp3 = SMA(temp1,13,2)
temp4 = SMA(SMA(temp2,13,2),13,2)
data = pd.concat([temp1,temp3,temp4], axis = 1, join = 'inner')
data.columns = ['t1','t2','t3']
alpha = pd.DataFrame(3 * data['t1'] - 2 * data['t2'] + data['t3'])
alpha.columns = ['alpha173']
return alpha
@timer
def alpha174(self):
close = self.close
close_delay = Delay(close,1)
close_std = STD(close,20)
data = pd.concat([close,close_delay,close_std], axis = 1, join = 'inner')
data.columns = ['close','close_delay','close_std']
data['close_std'][data['close'] <= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data['close_std']),12,1)
alpha.columns = ['alpha174']
return alpha
@timer
def alpha175(self):
high = self.high
close = self.close
low = self.low
close_delay = Delay(close,1)
data = pd.concat([high,close,low,close_delay],axis = 1, join = 'inner')
data.columns = ['high','close','low','close_delay']
data_abs = pd.DataFrame(np.abs(data['close_delay'] - data['high']))
h_l = pd.DataFrame(data['high'] - data['low'])
data1 = pd.concat([data_abs,h_l], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.max(data1,axis = 1))
data_abs2 = pd.DataFrame(np.abs(data['close_delay'] - data['low']))
data2 = pd.concat([temp1,data_abs2], axis = 1, join = 'inner')
temp2 = pd.DataFrame(np.max(data2,axis = 1))
data3 = pd.concat([temp1,temp2],axis = 1, join = 'inner')
max_temp = pd.DataFrame(np.max(data3,axis = 1))
alpha = Mean(max_temp,6)
alpha.columns = ['alpha175']
return alpha
@timer
def alpha176(self):
high = self.high
close = self.close
low = self.low
volume = self.volume
low_min = TsMin(low,12)
high_max = TsMax(high,12)
data1 = pd.concat([close,low_min,high_max],axis = 1, join = 'inner')
data1.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data1['close'] - data1['low_min'])\
/(data1['high_max'] - data1['low_min']))
r1 = Rank(temp)
r2 = Rank(volume)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
alpha = Corr(rank,6)
alpha.columns = ['alpha176']
return alpha
@timer
def alpha177(self):
high = self.high
highday = Highday(high,20)
alpha = pd.DataFrame((20 - highday)/20.0 * 100)
alpha.columns = ['alpha177']
return alpha
@timer
def alpha178(self):
close = self.close
close_delay = Delay(close,1)
volume = self.volume
data = pd.concat([close,close_delay,volume], axis = 1, join = 'inner')
data.columns = ['close','close_delay','volume']
alpha = pd.DataFrame((data['close'] - data['close_delay'])\
/data['close_delay'] * data['volume'])
alpha.columns = ['alpha178']
return alpha
@timer
def alpha179(self):
low = self.low
volume = self.volume
vwap = self.vwap
data1 = pd.concat([vwap,volume], axis = 1, join = 'inner')
corr = Corr(data1,4)
r1 = Rank(corr)
volume_mean = Mean(volume,50)
volume_mean_r = Rank(volume_mean)
row_r = Rank(low)
data2 = pd.concat([row_r,volume_mean_r], axis = 1, join = 'inner')
corr2 = Corr(data2,12)
r2 = Rank(corr2)
data = pd.concat([r1,r2], axis = 1, join = 'inner')
data.columns = ['r1','r2']
alpha = pd.DataFrame(data['r1'] * data['r2'])
alpha.columns = ['alpha179']
return alpha
@timer
def alpha180(self):
volume = self.volume
close = self.close
close_delta = Delta(close,7)
volume_mean = Mean(volume,20)
close_delta_abs = pd.DataFrame(np.abs(close_delta))
r = TsRank(close_delta_abs,60)
sign = pd.DataFrame(np.sign(close_delta))
temp = pd.concat([r,sign],axis = 1, join = 'inner')
temp.columns = ['r','sign']
temp1 = temp['r'] * temp['sign']
data = pd.concat([volume,volume_mean,temp1], axis = 1, join = 'inner')
data.columns = ['volume','volume_mean','temp1']
data['volume1'] = data['volume']
data['temp1'][data['volume'] >= data['volume_mean']] = 0
data['volume1'][data['volume'] < data['volume_mean']] = 0
alpha = -1 * pd.DataFrame(data['volume1'] + data['temp1'])
alpha.columns = ['alpha180']
return alpha
@timer
def alpha181(self):
close = self.close
close_index = self.close_index
close_delay = Delay(close,1)
data1 = pd.concat([close,close_delay],axis = 1, join = 'inner')
data1.columns = ['close','close_delay']
temp = pd.DataFrame(data1['close']/data1['close_delay']) - 1
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp,temp_mean], axis = 1, join = 'inner')
data_temp.columns = ['temp','temp_mean']
temp1 = pd.DataFrame(data_temp['temp'] - data_temp['temp_mean'])
close_index_mean = Mean(close_index,20)
data2 = pd.concat([close_index,close_index_mean], axis = 1, join = 'inner')
data2.columns = ['close_index','close_index_mean']
temp2 = pd.DataFrame(np.power(data2['close_index'] - data2['close_index_mean'],2))
temp3 = pd.DataFrame(np.power(data2['close_index'] - data2['close_index_mean'],3))
temp1_unstack = temp1.unstack()
temp2_unstack = temp2.unstack()
temp2_mod = pd.DataFrame(repmat(temp2_unstack,1,np.size(temp1_unstack,1)))
temp3_unstack = temp3.unstack()
temp3_mod = pd.DataFrame(repmat(temp3_unstack,1,np.size(temp1_unstack,1)))
temp1_result = temp1_unstack.rolling(20, min_periods = 20).sum()
temp2_result = temp2_mod.rolling(20, min_periods = 20).sum()
temp2_result.index = temp2_unstack.index.tolist()
temp3_result = temp3_mod.rolling(20, min_periods = 20).sum()
temp3_result.index = temp3_unstack.index.tolist()
result = pd.concat([temp1_result,temp2_result,temp3_result], axis = 1, join = 'inner')
m = np.size(temp1_result,1)
alpha_temp = pd.DataFrame((result.values[:,:m] - result.values[:,m:2*m])/result.values[:,2*m:])
df1 = result.iloc[:,:m]
alpha_temp.columns = df1.columns.tolist()
alpha_temp.index = df1.index.tolist()
alpha = pd.DataFrame(alpha_temp.stack())
alpha.columns = ['alpha181']
return alpha
@timer
def alpha182(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1['temp'] = 1
data1['temp'][data1['Close'] <= data1['Open']] = 0
data1['temp1'] = 1
data1['temp1'][data1['Close'] > data1['Open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2['tep'] = 0
data2['tep'][data2['close'] > data2['open']] = 1
data2['tep1'] = 0
data2['tep1'][data2['close'] < data2['open']] = 1
temp = data1['temp'].unstack()
temp1 = data1['temp1'].unstack()
tep = data2['tep'].unstack()
tep1 = data2['tep1'].unstack()
tep_rep = repmat(tep,1,np.size(temp,1))
tep1_rep = repmat(tep1,1,np.size(temp,1))
data3 = temp * tep_rep + temp1 * tep1_rep - temp * tep_rep * temp1 * tep1_rep
result = data3.rolling(20,min_periods = 20).sum()
alpha_temp = result/20.0
alpha = pd.DataFrame(alpha_temp.stack())
alpha.columns = ['alpha182']
return alpha
@timer
def alpha183(self):
close = self.close
close_mean = Mean(close,24)
close_std = STD(close,24)
data1 = pd.concat([close,close_mean], axis = 1, join = 'inner')
data1.columns = ['close','close_mean']
temp = pd.DataFrame(data1['close'] - data1['close_mean'])
temp_max = TsMin(temp,24)
temp_min = TsMin(temp,24)
data2 = pd.concat([temp_max,temp_min,close_std],axis = 1, join = 'inner')
data2.columns = ['max','min','std']
alpha = pd.DataFrame((data2['max'] - data2['min'])/data2['std'])
alpha.columns = ['alpha183']
return alpha
@timer
def alpha184(self):
close = self.close
Open = self.open
data = pd.concat([close,Open], axis = 1, join = 'inner')
data['diff'] = data['Open'] - data['Close']
diff_delay = Delay(pd.DataFrame(data['diff']),1)
data1 = pd.concat([diff_delay,close],axis = 1, join = 'inner')
corr = Corr(data1,200)
r1 = Rank(corr)
r2 = Rank(pd.DataFrame(data['diff']))
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha184']
return alpha
@timer
def alpha185(self):
close = self.close
Open = self.open
data = pd.concat([close,Open], axis = 1, join = 'inner')
temp = pd.DataFrame(data['Open']/data['Close'])
tep = -1 * (1 - np.power(temp,2))
alpha = Rank(pd.DataFrame(tep))
alpha.columns = ['alpha185']
return alpha
@timer
def alpha186(self):
high = self.high
low = self.low
hd = HD(high)
ld = LD(low)
data = pd.concat([hd,ld],axis = 1, join = 'inner')
data.columns = ['hd','ld']
data['temp'] = 0
data['temp'][((data['hd'] > data['ld'])& (data['hd'] > 0)) | \
((data['ld'] > data['hd'])& (data['ld'] > 0))] = 1
temp = pd.DataFrame(data['temp'])
temp_mean = Mean(temp,6)
temp_mean_delay = Delay(temp_mean,6)
data = pd.concat([temp_mean,temp_mean_delay], axis = 1, join = 'inner')
data.columns = ['mean','delay']
alpha = pd.DataFrame((data['mean'] + data['delay'])/2)
alpha.columns = ['alpha186']
return alpha
@timer
def alpha187(self):
Open = self.open
high = self.high
open_delay = Delay(Open,1)
data = pd.concat([Open,high,open_delay], axis = 1, join = 'inner')
data.columns = ['open','high','open_delay']
diff = pd.DataFrame(data['high'] - data['open'])
open_delta = Delta(Open,1)
data1 = pd.concat([diff, open_delta], axis = 1, join = 'inner')
max_temp = pd.DataFrame(np.max(data1, axis = 1))
temp = Sum(max_temp,20)
data2 = pd.concat([Open,open_delay,temp],axis = 1, join = 'inner')
data2.columns = ['open','open_delay','temp']
data2['temp'][data['open'] > data['open_delay']] = 0
alpha = pd.DataFrame(data2['temp'])
alpha.columns = ['alpha187']
return alpha
@timer
def alpha188(self):
high = self.high
low = self.low
data = pd.concat([high,low],axis = 1, join = 'inner')
temp = SMA(pd.DataFrame(data['High'] - data['Low']),11,2)
data1 = pd.concat([temp,low,high],axis = 1, join = 'inner')
data1.columns = ['temp','low','high']
alpha = (data1['high'] - data1['low'] - data1['temp'])/data1['temp'] * 100
alpha =
|
pd.DataFrame(alpha)
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([
|
Timestamp('2011-01-01 10:00', tz=tz)
|
pandas.tseries.index.Timestamp
|
import os
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian
import pandas as pd
from pandas import DataFrame, HDFStore, Series, _testing as tm, read_hdf
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io import pytables as pytables
from pandas.io.pytables import ClosedFileError, PossibleDataLossError, Term
pytestmark = pytest.mark.single
def test_mode(setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
msg = r"[\S]* does not exist"
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
with HDFStore(path, mode=mode) as store:
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
msg = (
"mode w is not allowed while performing a read. "
r"Allowed modes are r, r\+ and a."
)
with pytest.raises(ValueError, match=msg):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] =
|
tm.makeTimeSeries()
|
pandas._testing.makeTimeSeries
|
import pandas as pd
def timeseries_to_pandas(ts,ind,x):
if x>1:
ts=list(map(list,zip(*ts)))
df=pd.DataFrame(data=ts,index=ind)
else:
df=pd.DataFrame(data=ts,index=ind)
return df.astype('float64')
def spectra_to_pandas(frequency,spectra,x,cols=None):
if x>1:
ts=list(map(list,zip(*spectra)))
df=pd.DataFrame(data=ts,index=frequency)
else:
df=
|
pd.DataFrame(data=spectra,index=frequency)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import pandas as pd
import requests
url_lemma = "https://news.yahoo.co.jp/search/?p=%E8%A6%B3%E5%85%89+%E8%AA%B2%E9%A1%8C&ei=UTF-8&b="
urls = [url_lemma + str(i) for i in range(1,992,10)]
def extract_text(url,prev_text = ""):
first_page = 0
if prev_text == "":
first_page = 1
response = requests.get(url, proxies=proxies)
soup = BeautifulSoup(response.text, 'html.parser')
text = (soup.find('div',class_="articleMain").text).split("\n")
text = list(filter(lambda x: ("画像" not in x), text))
text = list(map(lambda x: x.strip(),text))
text = list(filter(None, text))
p_ind = 0
for index,paragraph in enumerate(text):
if paragraph[-3:] == "ページ":
p_ind = 1
page_numbering = paragraph.strip()[:-3]
current_page = page_numbering.split("/")[0]
final_page = page_numbering.split("/")[1]
"""
if "次ページは" in paragraph:
### Recursive call inside a nested multi-page article
p_ind = 1
text = text[:index]
next_url = (soup.find("div",class_="textCenter marT10")).find('a').get('href')
prev_text += extract_text(next_url," ".join(text))
"""
if p_ind == 0:
for index,paragraph in enumerate(text):
if "関連記事" in paragraph:
### In case of single page article
text = text[:index]
meta_data = (soup.find("div",class_="hd").text).split("\n")
meta_data = list(filter(None,meta_data))
title = meta_data[0].strip()
date = url.split("a=")[1][:8]
return " ".join(text),title,date
if paragraph[-3:] == "ページ":
### Last page of a multiple page article (recursion floor)
text = text[:index -2]
return " ".join(text)
if first_page == 1:
### Final return in the recursion (First page)
meta_data = (soup.find("div",class_="hd").text).split("\n")
meta_data = list(filter(None,meta_data))
title = meta_data[0].strip()
date = url.split("a=")[1][:8]
return [prev_text,title,date]
### String return in the middle of recursion
return prev_text
#%%
contents = list()
for url in urls:
response = requests.get(url, proxies=proxies)
soup = BeautifulSoup(response.text, 'html.parser')
frame = soup.find("div",class_="cl")
articles = frame.find_all("h2",class_="t")
for article in articles:
article_url = article.find('a').get('href')
try:
temp = (extract_text(article_url))
if type(temp) == list:
contents.append(temp)
except:
pass
if len(contents) > 10:
break
#%%
df =
|
pd.DataFrame(contents,columns=["タイトル","日付","テキスト"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange
from pandas import DataFrame, MultiIndex, Series, date_range, notna
import pandas.core.panel as panelm
from pandas.core.panel import Panel
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal,
makeCustomDataframe as mkdf, makeMixedDataFrame)
from pandas.tseries.offsets import MonthEnd
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class PanelTests(object):
panel = None
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForSparse(object):
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class CheckIndexing(object):
def test_delitem_and_pop(self):
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
|
tm.assert_frame_equal(panelc[0], panel[0])
|
pandas.util.testing.assert_frame_equal
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 qizai <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
This script will take the bedgraph file as input, and process it to create the output
binning intensity.
"""
import os
from pyBedGraph import BedGraph
import numpy as np
import pandas as pd
import scipy
from scipy.stats import binom_test
import ipdb
import argparse
import matplotlib.pyplot as plt
import matplotlib as mpl
def get_max_intensity_in_same_len_bins(bedGraph, nbins, left_start, chrom_left, right_end,
chrom_right=None, chrom_size = np.infty, flank_per=5):
'''
if chrom_right != None, then check if chrom_left == chrom_right.
pyBedGraph can only query [chr, start, end] tuple.
----
left_start: left anchor starting site
right_end: right anchor ending site
nbins: number of bins in the loop
flank_per: percent of loop length to extend on both side.
'''
if chrom_right != None:
if chrom_left != chrom_right:
raise ValueError('row has anchors in different chromosome {}, {}'.format(chrom_left,
left_start))
loop_length = right_end - left_start
assert loop_length > 0
flank_length = int(loop_length * flank_per / 100)
start_idx = max(left_start - flank_length, 0)
# ipdb.set_trace()
end_idx = min(right_end + flank_length, chrom_size.values[0] - 1)
if start_idx < 0 or start_idx > chrom_size.values[0] - 1:
ipdb.set_trace()
nbins_edges = np.linspace(start_idx, end_idx, nbins + 1, dtype=np.int32)
start_list = nbins_edges[:-1]
end_list = nbins_edges[1:]
try:
bin_values = bedGraph.stats(start_list=start_list,
end_list=end_list,
chrom_name=chrom_left,
stat='max')
except:
print(chrom_left)
print(end_list)
print(start_idx)
ipdb.set_trace()
return bin_values
def get_aggregated_inten_for_each_class(df_binned_intensity_per_loop, nbins, catag):
'''
nbins \in {100, 500, 1000}
catag \in {'bias', 'convergence', 'NULL motif'}
'''
bin_name = '{} binned intensity'.format(nbins)
set_of_label = set(df_binned_intensity_per_loop[catag])
label_list = list([x for x in set_of_label if x != 'na'])
label_list.sort()
total_num_loops_in_catag = (
df_binned_intensity_per_loop[catag] != 'na').sum()
chrom_list = list(set(df_binned_intensity_per_loop['chrom']))
chrom_list.sort(key=lambda x: int(x[3:]) if x != 'chrX' else 24)
chrom_list.append('whole genome')
df_aggregate_sum = pd.DataFrame(columns=label_list, index=chrom_list)
df_aggregate_mean = pd.DataFrame(columns=label_list, index=chrom_list)
df_aggregate_var =
|
pd.DataFrame(columns=label_list, index=chrom_list)
|
pandas.DataFrame
|
import datetime
import io
import pathlib
import dataclasses
import pytest
import pandas as pd
import structlog
from covidactnow.datapublic.common_fields import CommonFields
from covidactnow.datapublic.common_fields import FieldName
from covidactnow.datapublic.common_fields import PdFields
from covidactnow.datapublic.common_test_helpers import to_dict
from libs import github_utils
from libs.datasets import AggregationLevel
from libs.datasets import combined_datasets
from libs.datasets import dataset_pointer
from libs.datasets import timeseries
from libs.datasets.taglib import TagType
from libs.datasets.taglib import UrlStr
from libs.pipeline import Region
from tests import test_helpers
from tests.dataset_utils_test import read_csv_and_index_fips
from tests.dataset_utils_test import read_csv_and_index_fips_date
from tests.test_helpers import TimeseriesLiteral
# turns all warnings into errors for this module
pytestmark = pytest.mark.filterwarnings("error", "ignore::libs.pipeline.BadFipsWarning")
def _make_dataset_pointer(tmpdir, filename: str = "somefile.csv") -> dataset_pointer.DatasetPointer:
# The fixture passes in a py.path, which is not the type in DatasetPointer.
path = pathlib.Path(tmpdir) / filename
fake_git_summary = github_utils.GitSummary(sha="abcdef", branch="main", is_dirty=True)
return dataset_pointer.DatasetPointer(
dataset_type=dataset_pointer.DatasetType.MULTI_REGION,
path=path,
data_git_info=fake_git_summary,
model_git_info=fake_git_summary,
updated_at=datetime.datetime.utcnow(),
)
@pytest.mark.parametrize("include_na_at_end", [False, True])
def test_remove_padded_nans(include_na_at_end):
rows = [
{"date": "2020-02-01", "cases": pd.NA},
{"date": "2020-02-02", "cases": pd.NA},
{"date": "2020-02-03", "cases": 1},
{"date": "2020-02-04", "cases": pd.NA},
{"date": "2020-02-05", "cases": 2},
{"date": "2020-02-06", "cases": 3},
]
if include_na_at_end:
rows += [{"date": "2020-02-07", "cases": pd.NA}]
df = pd.DataFrame(rows)
results = timeseries._remove_padded_nans(df, ["cases"])
expected_series = pd.Series([1, pd.NA, 2, 3], name="cases")
pd.testing.assert_series_equal(results.cases, expected_series)
def test_multi_region_to_from_timeseries_and_latest_values(tmp_path: pathlib.Path):
ts_df = read_csv_and_index_fips_date(
"fips,county,aggregate_level,date,m1,m2\n"
"97111,Bar County,county,2020-04-02,2,\n"
"97222,Foo County,county,2020-04-01,,10\n"
"01,,state,2020-04-01,,20\n"
).reset_index()
latest_values_df = read_csv_and_index_fips(
"fips,county,aggregate_level,c1,c2\n"
"97111,Bar County,county,3,\n"
"97222,Foo County,county,4,10.5\n"
"01,,state,,123.4\n"
).reset_index()
multiregion = (
timeseries.MultiRegionDataset.from_fips_timeseries_df(ts_df)
.add_fips_static_df(latest_values_df)
.add_provenance_csv(
io.StringIO("location_id,variable,provenance\n" "iso1:us#fips:97111,m1,ts197111prov\n")
)
)
region_97111 = multiregion.get_one_region(Region.from_fips("97111"))
assert region_97111.date_indexed.at["2020-04-02", "m1"] == 2
assert region_97111.latest["c1"] == 3
assert multiregion.get_one_region(Region.from_fips("01")).latest["c2"] == 123.4
csv_path = tmp_path / "multiregion.csv"
multiregion.to_csv(csv_path)
multiregion_loaded = timeseries.MultiRegionDataset.from_csv(csv_path)
region_97111 = multiregion_loaded.get_one_region(Region.from_fips("97111"))
assert region_97111.date_indexed.at["2020-04-02", "m1"] == 2
assert region_97111.latest["c1"] == 3
assert region_97111.region.fips == "97111"
assert multiregion_loaded.get_one_region(Region.from_fips("01")).latest["c2"] == 123.4
test_helpers.assert_dataset_like(
multiregion, multiregion_loaded, drop_na_latest=True, drop_na_timeseries=True
)
def test_multi_region_get_one_region():
ts = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,county,aggregate_level,date,m1,m2\n"
"iso1:us#fips:97111,Bar County,county,2020-04-02,2,\n"
"iso1:us#fips:97222,Foo County,county,2020-04-01,,10\n"
"iso1:us#fips:97111,Bar County,county,,3,\n"
"iso1:us#fips:97222,Foo County,county,,,11\n"
)
)
region_97111_ts = ts.get_one_region(Region.from_fips("97111"))
assert to_dict(["date"], region_97111_ts.data[["date", "m1", "m2"]]) == {
pd.to_datetime("2020-04-02"): {"m1": 2}
}
assert region_97111_ts.latest["m1"] == 3
assert region_97111_ts.region.fips == "97111"
region_97222_ts = ts.get_one_region(Region.from_fips("97222"))
assert to_dict(["date"], region_97222_ts.data) == {
pd.to_datetime("2020-04-01"): {"m2": 10, "location_id": "iso1:us#fips:97222",}
}
assert region_97222_ts.latest["m2"] == 11
def test_multi_region_get_counties_and_places():
ds_in = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,county,aggregate_level,date,m1,m2\n"
"iso1:us#fips:97111,Bar County,county,2020-04-02,2,\n"
"iso1:us#fips:97111,Bar County,county,2020-04-03,3,\n"
"iso1:us#fips:97222,Foo County,county,2020-04-01,,10\n"
"iso1:us#fips:9711122,,place,2020-04-02,5,60\n"
"iso1:us#fips:97,Great State,state,2020-04-01,1,2\n"
"iso1:us#fips:97111,Bar County,county,,3,\n"
"iso1:us#fips:9711122,,place,,3,\n"
"iso1:us#fips:97222,Foo County,county,,,10\n"
"iso1:us#fips:97,Great State,state,,1,2\n"
)
)
ds_out = ds_in.get_counties_and_places(
after=pd.to_datetime("2020-04-01")
).timeseries.reset_index()
assert to_dict(["location_id", "date"], ds_out[["location_id", "date", "m1"]]) == {
("iso1:us#fips:97111", pd.to_datetime("2020-04-02")): {"m1": 2},
("iso1:us#fips:97111", pd.to_datetime("2020-04-03")): {"m1": 3},
("iso1:us#fips:9711122", pd.to_datetime("2020-04-02")): {"m1": 5},
}
def test_multi_region_groupby():
ts = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,county,aggregate_level,date,m1,m2\n"
"iso1:us#fips:97222,Foo County,county,2020-04-01,,10\n"
"iso1:us#fips:97222,Foo County,county,2020-04-02,,20\n"
"iso1:us#fips:97,Great State,state,2020-04-01,1,2\n"
"iso1:us#fips:97222,Foo County,county,,,20\n"
"iso1:us#fips:97,Great State,state,,1,2\n"
)
)
assert ts.groupby_region()["m2"].last().to_dict() == {
"iso1:us#fips:97": 2,
"iso1:us#fips:97222": 20,
}
def test_one_region_dataset():
bar_county_row = {
"location_id": "iso1:us#fips:97111",
"county": "Bar County",
"aggregate_level": "county",
"date": "2020-04-02",
"m1": 2,
"m2": pd.NA,
}
ts = timeseries.OneRegionTimeseriesDataset(
Region.from_fips("97111"), pd.DataFrame([bar_county_row]), {}
)
assert ts.has_one_region() == True
foo_county_row = {
"location_id": "iso1:us#fips:97222",
"county": "Foo County",
"aggregate_level": "county",
"date": "2020-04-01",
"m1": pd.NA,
"m2": 10,
}
with pytest.raises(ValueError):
timeseries.OneRegionTimeseriesDataset(
Region.from_fips("97222"), pd.DataFrame([bar_county_row, foo_county_row]), {},
)
with structlog.testing.capture_logs() as logs:
ts = timeseries.OneRegionTimeseriesDataset(
Region.from_fips("99999"),
pd.DataFrame([], columns="location_id county aggregate_level date m1 m2".split()),
{},
)
assert [l["event"] for l in logs] == ["Creating OneRegionTimeseriesDataset with zero regions"]
assert ts.empty
def test_multiregion_provenance():
input_df = read_csv_and_index_fips_date(
"fips,county,aggregate_level,date,m1,m2\n"
"97111,Bar County,county,2020-04-01,1,\n"
"97111,Bar County,county,2020-04-02,2,\n"
"97222,Foo County,county,2020-04-01,,10\n"
"97222,Foo County,county,2020-04-03,3,30\n"
"03,,state,2020-04-03,4,40\n"
).reset_index()
provenance = combined_datasets.provenance_wide_metrics_to_series(
read_csv_and_index_fips_date(
"fips,date,m1,m2\n"
"97111,2020-04-01,src11,\n"
"97111,2020-04-02,src11,\n"
"97222,2020-04-01,,src22\n"
"97222,2020-04-03,src21,src22\n"
"03,2020-04-03,src31,src32\n"
),
structlog.get_logger(),
)
out = timeseries.MultiRegionDataset.from_fips_timeseries_df(input_df).add_fips_provenance(
provenance
)
# Use loc[...].at[...] as work-around for https://github.com/pandas-dev/pandas/issues/26989
assert out.provenance.loc["iso1:us#fips:97111"].at["m1"] == "src11"
assert out.get_one_region(Region.from_fips("97111")).provenance["m1"] == ["src11"]
assert out.provenance.loc["iso1:us#fips:97222"].at["m2"] == "src22"
assert out.get_one_region(Region.from_fips("97222")).provenance["m2"] == ["src22"]
assert out.provenance.loc["iso1:us#fips:03"].at["m2"] == "src32"
assert out.get_one_region(Region.from_fips("03")).provenance["m2"] == ["src32"]
counties = out.get_counties_and_places(after=pd.to_datetime("2020-04-01"))
assert "iso1:us#fips:03" not in counties.provenance.index
assert counties.provenance.loc["iso1:us#fips:97222"].at["m1"] == "src21"
assert counties.get_one_region(Region.from_fips("97222")).provenance["m1"] == ["src21"]
def test_one_region_multiple_provenance():
tag1 = test_helpers.make_tag(date="2020-04-01")
tag2 = test_helpers.make_tag(date="2020-04-02")
dataset_in = test_helpers.build_default_region_dataset(
{
CommonFields.ICU_BEDS: TimeseriesLiteral(
[0, 2, 4], annotation=[tag1, tag2], provenance=["prov1", "prov2"],
),
CommonFields.CASES: [100, 200, 300],
}
)
one_region = dataset_in.get_one_region(test_helpers.DEFAULT_REGION)
assert set(one_region.annotations(CommonFields.ICU_BEDS)) == {tag1, tag2}
assert sorted(one_region.provenance[CommonFields.ICU_BEDS]) == ["prov1", "prov2"]
def test_append_regions():
ts_fips = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-03,Bar County,county,3,\n"
"iso1:us#fips:97222,2020-04-04,Foo County,county,,11\n"
"iso1:us#fips:97111,,Bar County,county,3,\n"
"iso1:us#fips:97222,,Foo County,county,,11\n"
)
).add_provenance_csv(
io.StringIO("location_id,variable,provenance\n" "iso1:us#fips:97111,m1,prov97111m1\n")
)
ts_cbsa = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,m2\n"
"iso1:us#cbsa:10100,2020-04-02,2\n"
"iso1:us#cbsa:10100,2020-04-03,3\n"
"iso1:us#cbsa:20200,2020-04-03,4\n"
"iso1:us#cbsa:10100,,3\n"
"iso1:us#cbsa:20200,,4\n"
)
).add_provenance_csv(
io.StringIO("location_id,variable,provenance\n" "iso1:us#cbsa:20200,m1,prov20200m2\n")
)
# Check that merge is symmetric
ts_merged_1 = ts_fips.append_regions(ts_cbsa)
ts_merged_2 = ts_cbsa.append_regions(ts_fips)
test_helpers.assert_dataset_like(ts_merged_1, ts_merged_2)
ts_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,,3\n"
"iso1:us#cbsa:20200,2020-04-03,,,,4\n"
"iso1:us#cbsa:10100,,,,,3\n"
"iso1:us#cbsa:20200,,,,,4\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-03,Bar County,county,3,\n"
"iso1:us#fips:97222,2020-04-04,Foo County,county,,11\n"
"iso1:us#fips:97111,,Bar County,county,3,\n"
"iso1:us#fips:97222,,Foo County,county,,11\n"
)
).add_provenance_csv(
io.StringIO(
"location_id,variable,provenance\n"
"iso1:us#fips:97111,m1,prov97111m1\n"
"iso1:us#cbsa:20200,m1,prov20200m2\n"
)
)
test_helpers.assert_dataset_like(ts_merged_1, ts_expected)
def test_append_regions_duplicate_region_raises():
ts1 = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
)
)
ts2 = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#fips:97111,2020-04-03,Bar County,county,2,\n"
)
)
with pytest.raises(ValueError):
ts1.append_regions(ts2)
def test_calculate_new_cases():
mrts_before = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,cases\n"
"iso1:us#fips:1,2020-01-01,0\n"
"iso1:us#fips:1,2020-01-02,1\n"
"iso1:us#fips:1,2020-01-03,1\n"
"iso1:us#fips:2,2020-01-01,5\n"
"iso1:us#fips:2,2020-01-02,7\n"
"iso1:us#fips:3,2020-01-01,9\n"
"iso1:us#fips:4,2020-01-01,\n"
"iso1:us#fips:1,,100\n"
"iso1:us#fips:2,,\n"
"iso1:us#fips:3,,\n"
"iso1:us#fips:4,,\n"
)
)
mrts_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,cases,new_cases\n"
"iso1:us#fips:1,2020-01-01,0,0\n"
"iso1:us#fips:1,2020-01-02,1,1\n"
"iso1:us#fips:1,2020-01-03,1,0\n"
"iso1:us#fips:2,2020-01-01,5,5\n"
"iso1:us#fips:2,2020-01-02,7,2\n"
"iso1:us#fips:3,2020-01-01,9,9\n"
"iso1:us#fips:4,2020-01-01,,\n"
"iso1:us#fips:1,,100,0.0\n"
"iso1:us#fips:2,,,2.0\n"
"iso1:us#fips:3,,,9.0\n"
"iso1:us#fips:4,,,\n"
)
)
timeseries_after = timeseries.add_new_cases(mrts_before)
test_helpers.assert_dataset_like(mrts_expected, timeseries_after)
def test_new_cases_remove_negative():
mrts_before = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,cases\n"
"iso1:us#fips:1,2020-01-01,100\n"
"iso1:us#fips:1,2020-01-02,50\n"
"iso1:us#fips:1,2020-01-03,75\n"
"iso1:us#fips:1,2020-01-04,74\n"
"iso1:us#fips:1,,75\n"
)
)
mrts_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,cases,new_cases\n"
"iso1:us#fips:1,2020-01-01,100,100\n"
"iso1:us#fips:1,2020-01-02,50,\n"
"iso1:us#fips:1,2020-01-03,75,25\n"
"iso1:us#fips:1,2020-01-04,74,0\n"
"iso1:us#fips:1,,75,0.0\n"
)
)
timeseries_after = timeseries.add_new_cases(mrts_before)
test_helpers.assert_dataset_like(mrts_expected, timeseries_after)
def test_new_cases_gap_in_date():
mrts_before = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,cases\n"
"iso1:us#fips:1,2020-01-01,100\n"
"iso1:us#fips:1,2020-01-02,\n"
"iso1:us#fips:1,2020-01-03,110\n"
"iso1:us#fips:1,2020-01-04,130\n"
)
)
mrts_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,cases,new_cases\n"
"iso1:us#fips:1,2020-01-01,100,100\n"
"iso1:us#fips:1,2020-01-02,,\n"
"iso1:us#fips:1,2020-01-03,110,\n"
"iso1:us#fips:1,2020-01-04,130,20\n"
)
)
timeseries_after = timeseries.add_new_cases(mrts_before)
test_helpers.assert_dataset_like(mrts_expected, timeseries_after)
def test_timeseries_long():
ts = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,,3\n"
"iso1:us#cbsa:10100,,,,,3\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
)
expected = pd.read_csv(
io.StringIO(
"location_id,date,variable,value\n"
"iso1:us#cbsa:10100,2020-04-02,m2,2\n"
"iso1:us#cbsa:10100,2020-04-03,m2,3\n"
"iso1:us#fips:97111,2020-04-02,m1,2\n"
"iso1:us#fips:97111,2020-04-04,m1,4\n"
),
parse_dates=[CommonFields.DATE],
dtype={"value": float},
)
long_series = ts._timeseries_long()
assert long_series.index.names == [
CommonFields.LOCATION_ID,
CommonFields.DATE,
PdFields.VARIABLE,
]
assert long_series.name == PdFields.VALUE
long_df = long_series.reset_index()
pd.testing.assert_frame_equal(long_df, expected, check_like=True)
def test_timeseries_wide_dates():
ds = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,,3\n"
"iso1:us#cbsa:10100,,,,,3\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
)
ds_wide = ds.timeseries_wide_dates()
assert ds_wide.index.names == [CommonFields.LOCATION_ID, PdFields.VARIABLE]
assert ds_wide.columns.names == [CommonFields.DATE]
expected = (
pd.read_csv(
io.StringIO(
"location_id,variable,2020-04-02,2020-04-03,2020-04-04\n"
"iso1:us#cbsa:10100,m2,2,3,\n"
"iso1:us#fips:97111,m1,2,,4\n"
),
)
.set_index(ds_wide.index.names)
.rename_axis(columns="date")
.astype(float)
)
expected.columns = pd.to_datetime(expected.columns)
pd.testing.assert_frame_equal(ds_wide, expected)
# Recreate the dataset using `from_timeseries_wide_dates_df`.
ds_recreated = timeseries.MultiRegionDataset.from_timeseries_wide_dates_df(
ds_wide
).add_static_values(ds.static.reset_index())
test_helpers.assert_dataset_like(ds, ds_recreated)
def test_timeseries_wide_dates_empty():
ts = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,,,,,3\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
)
timeseries_wide = ts.timeseries_wide_dates()
assert timeseries_wide.index.names == [CommonFields.LOCATION_ID, PdFields.VARIABLE]
assert timeseries_wide.columns.names == [CommonFields.DATE]
assert timeseries_wide.empty
def test_write_read_wide_dates_csv_compare_literal(tmpdir):
pointer = _make_dataset_pointer(tmpdir)
region_as = Region.from_state("AS")
region_sf = Region.from_fips("06075")
metrics_as = {
CommonFields.ICU_BEDS: TimeseriesLiteral([0, 2, 4], provenance="pt_src1"),
CommonFields.CASES: [100, 200, 300],
}
metrics_sf = {
CommonFields.DEATHS: TimeseriesLiteral([1, 2, None], provenance="pt_src2"),
CommonFields.CASES: [None, 210, 310],
}
dataset_in = test_helpers.build_dataset({region_as: metrics_as, region_sf: metrics_sf})
dataset_in.write_to_dataset_pointer(pointer)
# Compare written file with a string literal so a test fails if something changes in how the
# file is written. The literal contains spaces to align the columns in the source.
assert pointer.path_wide_dates().read_text() == (
" location_id,variable,provenance,2020-04-03,2020-04-02,2020-04-01\n"
" iso1:us#iso2:us-as, cases, , 300, 200, 100\n"
" iso1:us#iso2:us-as,icu_beds, pt_src1, 4, 2, 0\n"
"iso1:us#iso2:us-ca#fips:06075, cases, , 310, 210\n"
"iso1:us#iso2:us-ca#fips:06075, deaths, pt_src2, , 2, 1\n"
).replace(" ", "")
dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)
test_helpers.assert_dataset_like(dataset_read, dataset_in)
def test_write_read_wide_dates_csv_with_annotation(tmpdir):
pointer = _make_dataset_pointer(tmpdir)
region = Region.from_state("AS")
metrics = {
CommonFields.ICU_BEDS: TimeseriesLiteral(
[0, 2, 4],
annotation=[
test_helpers.make_tag(date="2020-04-01"),
test_helpers.make_tag(type=TagType.ZSCORE_OUTLIER, date="2020-04-02"),
],
),
CommonFields.CASES: [100, 200, 300],
}
dataset_in = test_helpers.build_dataset({region: metrics})
dataset_in.write_to_dataset_pointer(pointer)
dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)
test_helpers.assert_dataset_like(dataset_read, dataset_in)
def test_write_read_dataset_pointer_with_provenance_list(tmpdir):
pointer = _make_dataset_pointer(tmpdir)
dataset_in = test_helpers.build_default_region_dataset(
{
CommonFields.ICU_BEDS: TimeseriesLiteral(
[0, 2, 4],
annotation=[
test_helpers.make_tag(date="2020-04-01"),
test_helpers.make_tag(date="2020-04-02"),
],
provenance=["prov1", "prov2"],
),
CommonFields.CASES: [100, 200, 300],
}
)
dataset_in.write_to_dataset_pointer(pointer)
dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)
test_helpers.assert_dataset_like(dataset_read, dataset_in)
def test_timeseries_drop_stale_timeseries_entire_region():
ds_in = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,,3\n"
"iso1:us#cbsa:10100,,,,,3\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
)
ds_out = ds_in.drop_stale_timeseries(pd.to_datetime("2020-04-04"))
ds_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,,,,,3\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
)
test_helpers.assert_dataset_like(ds_out, ds_expected)
def test_timeseries_drop_stale_timeseries_one_metric():
csv_in = (
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,11,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,,3\n"
"iso1:us#cbsa:10100,,,,,3\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
ds_in = timeseries.MultiRegionDataset.from_csv(io.StringIO(csv_in)).add_provenance_csv(
io.StringIO(
"location_id,variable,provenance\n"
"iso1:us#cbsa:10100,m1,m1-10100prov\n"
"iso1:us#cbsa:10100,m2,m2-10100prov\n"
"iso1:us#fips:97111,m1,m1-97111prov\n"
)
)
ds_out = ds_in.drop_stale_timeseries(pd.to_datetime("2020-04-03"))
# The only timeseries that is stale with cutoff of 4/3 is the CBSA m1. The expected
# dataset is the same as the input with "11" removed from the timeseries and
# corresponding provenance removed.
ds_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(csv_in.replace(",11,", ",,"))
).add_provenance_csv(
io.StringIO(
"location_id,variable,provenance\n"
"iso1:us#cbsa:10100,m2,m2-10100prov\n"
"iso1:us#fips:97111,m1,m1-97111prov\n"
)
)
test_helpers.assert_dataset_like(ds_out, ds_expected)
def test_timeseries_drop_stale_timeseries_with_tag():
region = Region.from_state("TX")
values_recent = [100, 200, 300, 400]
values_stale = [100, 200, None, None]
ts_recent = TimeseriesLiteral(values_recent, annotation=[test_helpers.make_tag()])
ts_stale = TimeseriesLiteral(values_stale, annotation=[test_helpers.make_tag()])
dataset_in = test_helpers.build_dataset(
{region: {CommonFields.CASES: ts_recent, CommonFields.DEATHS: ts_stale}}
)
dataset_out = dataset_in.drop_stale_timeseries(pd.to_datetime("2020-04-03"))
assert len(dataset_out.tag) == 1
# drop_stale_timeseries preserves the empty DEATHS column so add it to dataset_expected
dataset_expected = test_helpers.build_dataset(
{region: {CommonFields.CASES: ts_recent}}, timeseries_columns=[CommonFields.DEATHS]
)
test_helpers.assert_dataset_like(dataset_out, dataset_expected)
def test_append_region_and_get_regions_subset_with_tag():
region_tx = Region.from_state("TX")
region_sf = Region.from_fips("06075")
values = [100, 200, 300, 400]
ts_with_tag = TimeseriesLiteral(values, annotation=[test_helpers.make_tag()])
dataset_tx = test_helpers.build_dataset({region_tx: {CommonFields.CASES: ts_with_tag}})
dataset_sf = test_helpers.build_dataset({region_sf: {CommonFields.CASES: ts_with_tag}})
dataset_appended = dataset_tx.append_regions(dataset_sf)
assert len(dataset_appended.tag) == 2
dataset_tx_and_sf = test_helpers.build_dataset(
{region_tx: {CommonFields.CASES: ts_with_tag}, region_sf: {CommonFields.CASES: ts_with_tag}}
)
test_helpers.assert_dataset_like(dataset_appended, dataset_tx_and_sf)
dataset_out = dataset_tx_and_sf.get_regions_subset([region_tx])
assert len(dataset_out.tag) == 1
test_helpers.assert_dataset_like(dataset_out, dataset_tx)
def test_one_region_annotations():
region_tx = Region.from_state("TX")
region_sf = Region.from_fips("06075")
values = [100, 200, 300, 400]
tag1 = test_helpers.make_tag(date="2020-04-01")
tag2a = test_helpers.make_tag(date="2020-04-02")
tag2b = test_helpers.make_tag(date="2020-04-03")
dataset_tx_and_sf = test_helpers.build_dataset(
{
region_tx: {CommonFields.CASES: (TimeseriesLiteral(values, annotation=[tag1]))},
region_sf: {CommonFields.CASES: (TimeseriesLiteral(values, annotation=[tag2a, tag2b]))},
}
)
# get_one_region and iter_one_regions use separate code to split up the tags. Test both of them.
assert dataset_tx_and_sf.get_one_region(region_tx).annotations(CommonFields.CASES) == [tag1]
assert dataset_tx_and_sf.get_one_region(region_sf).annotations(CommonFields.CASES) == [
tag2a,
tag2b,
]
assert {
region: one_region_dataset.annotations(CommonFields.CASES)
for region, one_region_dataset in dataset_tx_and_sf.iter_one_regions()
} == {region_sf: [tag2a, tag2b], region_tx: [tag1],}
def test_timeseries_latest_values():
dataset = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,10,3\n"
"iso1:us#cbsa:10100,2020-04-04,,,,1\n"
"iso1:us#cbsa:10100,,,,,4\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,5,\n"
)
)
# Check bulk access via _timeseries_latest_values
expected = pd.read_csv(
io.StringIO("location_id,m1,m2\n" "iso1:us#cbsa:10100,10,1\n" "iso1:us#fips:97111,4,\n")
)
latest_from_timeseries = dataset._timeseries_latest_values().reset_index()
pd.testing.assert_frame_equal(
latest_from_timeseries, expected, check_like=True, check_dtype=False
)
# Check access to timeseries latests values via get_one_region
region_10100 = dataset.get_one_region(Region.from_cbsa_code("10100"))
assert region_10100.latest == {
"aggregate_level": None,
"county": None,
"m1": 10, # Derived from timeseries
"m2": 4, # Explicitly in recent values
}
region_97111 = dataset.get_one_region(Region.from_fips("97111"))
assert region_97111.latest == {
"aggregate_level": "county",
"county": "Bar County",
"m1": 5,
"m2": None,
}
def test_timeseries_latest_values_copied_to_static():
dataset = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,t1,s1\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,10,3\n"
"iso1:us#cbsa:10100,2020-04-04,,,,1\n"
"iso1:us#cbsa:10100,,,,,4\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,,\n"
)
)
# Check access to latest values as copied to static
t1 = FieldName("t1")
s1 = FieldName("s1")
dataset_t1_latest_in_static = dataset.latest_in_static(t1)
assert dataset_t1_latest_in_static.static.loc["iso1:us#cbsa:10100", t1] == 10
assert dataset_t1_latest_in_static.static.loc["iso1:us#fips:97111", t1] == 4
# Trying to copy the latest values of s1 fails because s1 already has a real value in static.
# See also longer comment where the ValueError is raised.
with pytest.raises(ValueError):
dataset.latest_in_static(s1)
def test_join_columns():
ts_1 = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1\n"
"iso1:us#cbsa:10100,2020-04-02,,,\n"
"iso1:us#cbsa:10100,2020-04-03,,,\n"
"iso1:us#cbsa:10100,,,,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4\n"
"iso1:us#fips:97111,,Bar County,county,4\n"
)
).add_provenance_csv(
io.StringIO(
"location_id,variable,provenance\n"
"iso1:us#cbsa:10100,m1,ts110100prov\n"
"iso1:us#fips:97111,m1,ts197111prov\n"
)
)
ts_2 = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,3\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,\n"
)
).add_provenance_csv(
io.StringIO(
"location_id,variable,provenance\n"
"iso1:us#cbsa:10100,m2,ts110100prov\n"
"iso1:us#fips:97111,m2,ts197111prov\n"
)
)
ts_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,,3\n"
"iso1:us#cbsa:10100,,,,,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
).add_provenance_csv(
io.StringIO(
"location_id,variable,provenance\n"
"iso1:us#cbsa:10100,m1,ts110100prov\n"
"iso1:us#cbsa:10100,m2,ts110100prov\n"
"iso1:us#fips:97111,m1,ts197111prov\n"
"iso1:us#fips:97111,m2,ts197111prov\n"
)
)
ts_joined = ts_1.join_columns(ts_2)
test_helpers.assert_dataset_like(ts_joined, ts_expected, drop_na_latest=True)
with pytest.raises(NotImplementedError):
ts_2.join_columns(ts_1)
with pytest.raises(ValueError):
# Raises because the same column is in both datasets
ts_2.join_columns(ts_2)
# Checking geo attributes is currently disabled.
# ts_2_variation_df = ts_2.combined_df.copy()
# ts_2_variation_df.loc[
# ts_2_variation_df[CommonFields.COUNTY] == "Bar County", CommonFields.COUNTY
# ] = "Bart County"
# ts_2_variation = timeseries.MultiRegionDataset.from_combined_dataframe(
# ts_2_variation_df
# )
# with pytest.raises(ValueError):
# ts_1.join_columns(ts_2_variation)
def test_join_columns_missing_regions():
ts_1 = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1\n"
"iso1:us#cbsa:10100,2020-04-02,,,\n"
"iso1:us#cbsa:10100,2020-04-03,,,\n"
"iso1:us#cbsa:10100,,,,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4\n"
"iso1:us#fips:97111,,Bar County,county,4\n"
)
)
ts_2 = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m2\n" "iso1:us#cbsa:10100,2020-04-02,,,2\n"
)
)
ts_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,,\n"
"iso1:us#cbsa:10100,,,,,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
)
ts_joined = ts_1.join_columns(ts_2)
test_helpers.assert_dataset_like(ts_joined, ts_expected, drop_na_latest=True)
def test_iter_one_region():
ts = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1\n"
"iso1:us#cbsa:10100,2020-04-02,,,\n"
"iso1:us#cbsa:10100,2020-04-03,,,\n"
"iso1:us#cbsa:10100,,,,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4\n"
"iso1:us#fips:97111,,Bar County,county,4\n"
# 97222 does not have a row of latest data to make sure it still works
"iso1:us#fips:97222,2020-04-02,No Recent County,county,3\n"
"iso1:us#fips:97222,2020-04-04,No Recent County,county,5\n"
)
)
assert {region.location_id for region, _ in ts.iter_one_regions()} == {
"iso1:us#cbsa:10100",
"iso1:us#fips:97111",
"iso1:us#fips:97222",
}
for it_region, it_one_region in ts.iter_one_regions():
one_region = ts.get_one_region(it_region)
assert (one_region.data.fillna("") == it_one_region.data.fillna("")).all(axis=None)
assert one_region.latest == it_one_region.latest
assert one_region.provenance == it_one_region.provenance
assert one_region.region == it_region
assert one_region.region == it_one_region.region
def test_drop_regions_without_population():
# Only regions with location_id containing 1 have population, those with 2 don't
ts_in = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,population,m1\n"
"iso1:us#cbsa:10100,2020-04-02,,,,\n"
"iso1:us#cbsa:10100,,,,80000,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,,2\n"
"iso1:us#fips:97111,,Bar County,county,40000,4\n"
"iso1:us#cbsa:20200,2020-04-02,,,,\n"
"iso1:us#cbsa:20200,,,,,\n"
"iso1:us#fips:97222,2020-04-02,Bar County,county,,2\n"
"iso1:us#fips:97222,,Bar County,county,,4\n"
)
)
ts_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,population,m1\n"
"iso1:us#cbsa:10100,2020-04-02,,,,\n"
"iso1:us#cbsa:10100,,,,80000,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,,2\n"
"iso1:us#fips:97111,,Bar County,county,40000,4\n"
)
)
with structlog.testing.capture_logs() as logs:
ts_out = timeseries.drop_regions_without_population(
ts_in, ["iso1:us#fips:97222"], structlog.get_logger()
)
test_helpers.assert_dataset_like(ts_out, ts_expected)
assert [l["event"] for l in logs] == ["Dropping unexpected regions without populaton"]
assert [l["location_ids"] for l in logs] == [["iso1:us#cbsa:20200"]]
def test_merge_provenance():
ts = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1\n"
"iso1:us#cbsa:10100,2020-04-02,,,\n"
"iso1:us#cbsa:10100,2020-04-03,,,\n"
"iso1:us#cbsa:10100,,,,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4\n"
"iso1:us#fips:97111,,Bar County,county,4\n"
)
).add_provenance_csv(
io.StringIO("location_id,variable,provenance\n" "iso1:us#cbsa:10100,m1,ts110100prov\n")
)
with pytest.raises(NotImplementedError):
ts.add_provenance_csv(
io.StringIO("location_id,variable,provenance\n" "iso1:us#fips:97111,m1,ts197111prov\n")
)
def test_append_tags():
region_sf = Region.from_fips("06075")
cases_values = [100, 200, 300, 400]
metrics_sf = {
CommonFields.POSITIVE_TESTS: TimeseriesLiteral([1, 2, 3, 4], provenance="pt_src2"),
CommonFields.CASES: cases_values,
}
dataset_in = test_helpers.build_dataset({region_sf: metrics_sf})
tag_sf_cases = test_helpers.make_tag(TagType.CUMULATIVE_TAIL_TRUNCATED, date="2020-04-02")
tag_df = test_helpers.make_tag_df(region_sf, CommonFields.CASES, [tag_sf_cases])
dataset_out = dataset_in.append_tag_df(tag_df)
metrics_sf[CommonFields.CASES] = TimeseriesLiteral(cases_values, annotation=[tag_sf_cases])
dataset_expected = test_helpers.build_dataset({region_sf: metrics_sf})
test_helpers.assert_dataset_like(dataset_out, dataset_expected)
def test_add_provenance_all_with_tags():
"""Checks that add_provenance_all (and add_provenance_series that it calls) preserves tags."""
region = Region.from_state("TX")
cases_values = [100, 200, 300, 400]
timeseries = TimeseriesLiteral(cases_values, annotation=[(test_helpers.make_tag())])
dataset_in = test_helpers.build_dataset({region: {CommonFields.CASES: timeseries}})
dataset_out = dataset_in.add_provenance_all("prov_prov")
timeseries = dataclasses.replace(timeseries, provenance=["prov_prov"])
dataset_expected = test_helpers.build_dataset({region: {CommonFields.CASES: timeseries}})
test_helpers.assert_dataset_like(dataset_out, dataset_expected)
def test_join_columns_with_tags():
"""Checks that join_columns preserves tags."""
region = Region.from_state("TX")
cases_values = [100, 200, 300, 400]
ts_lit = TimeseriesLiteral(cases_values, annotation=[test_helpers.make_tag()])
dataset_cases = test_helpers.build_dataset({region: {CommonFields.CASES: ts_lit}})
dataset_deaths = test_helpers.build_dataset({region: {CommonFields.DEATHS: ts_lit}})
dataset_out = dataset_cases.join_columns(dataset_deaths)
assert len(dataset_out.tag) == 2
# The following checks that the tags in `ts_lit` have been preserved.
dataset_expected = test_helpers.build_dataset(
{region: {CommonFields.CASES: ts_lit, CommonFields.DEATHS: ts_lit}}
)
test_helpers.assert_dataset_like(dataset_out, dataset_expected)
def test_drop_column_with_tags():
"""Checks that join_columns preserves tags."""
region = Region.from_state("TX")
cases_values = [100, 200, 300, 400]
ts_lit = TimeseriesLiteral(cases_values, annotation=[test_helpers.make_tag()])
dataset_in = test_helpers.build_dataset(
{region: {CommonFields.CASES: ts_lit, CommonFields.DEATHS: ts_lit}}
)
dataset_out = dataset_in.drop_column_if_present(CommonFields.DEATHS)
assert len(dataset_out.tag) == 1
dataset_expected = test_helpers.build_dataset({region: {CommonFields.CASES: ts_lit}})
test_helpers.assert_dataset_like(dataset_out, dataset_expected)
def test_remove_outliers():
values = [10.0] * 7 + [1000.0]
dataset = test_helpers.build_default_region_dataset({CommonFields.NEW_CASES: values})
dataset = timeseries.drop_new_case_outliers(dataset)
# Expected result is the same series with the last value removed
expected_tag = test_helpers.make_tag(
TagType.ZSCORE_OUTLIER, date="2020-04-08", original_observation=1000.0,
)
expected_ts = TimeseriesLiteral([10.0] * 7, annotation=[expected_tag])
expected = test_helpers.build_default_region_dataset({CommonFields.NEW_CASES: expected_ts})
test_helpers.assert_dataset_like(dataset, expected, drop_na_dates=True)
def test_remove_outliers_threshold():
values = [1.0] * 7 + [30.0]
dataset = test_helpers.build_default_region_dataset({CommonFields.NEW_CASES: values})
result = timeseries.drop_new_case_outliers(dataset, case_threshold=30)
# Should not modify becasue not higher than threshold
test_helpers.assert_dataset_like(dataset, result)
result = timeseries.drop_new_case_outliers(dataset, case_threshold=29)
# Expected result is the same series with the last value removed
expected_tag = test_helpers.make_tag(
TagType.ZSCORE_OUTLIER, date="2020-04-08", original_observation=30.0
)
expected_ts = TimeseriesLiteral([1.0] * 7, annotation=[expected_tag])
expected = test_helpers.build_default_region_dataset({CommonFields.NEW_CASES: expected_ts})
test_helpers.assert_dataset_like(result, expected, drop_na_dates=True)
def test_not_removing_short_series():
values = [None] * 7 + [1, 1, 300]
dataset = test_helpers.build_default_region_dataset({CommonFields.NEW_CASES: values})
result = timeseries.drop_new_case_outliers(dataset, case_threshold=30)
# Should not modify becasue not higher than threshold
test_helpers.assert_dataset_like(dataset, result)
def test_timeseries_empty_timeseries_and_static():
# Check that empty dataset creates a MultiRegionDataset
# and that get_one_region raises expected exception.
dataset = timeseries.MultiRegionDataset.new_without_timeseries()
with pytest.raises(timeseries.RegionLatestNotFound):
dataset.get_one_region(Region.from_fips("01001"))
def test_timeseries_empty():
# Check that empty geodata_timeseries_df creates a MultiRegionDataset
# and that get_one_region raises expected exception.
dataset = timeseries.MultiRegionDataset.from_geodata_timeseries_df(
pd.DataFrame([], columns=[CommonFields.LOCATION_ID, CommonFields.DATE])
)
with pytest.raises(timeseries.RegionLatestNotFound):
dataset.get_one_region(Region.from_fips("01001"))
def test_timeseries_empty_static_not_empty():
# Check that empty timeseries does not prevent static data working as expected.
dataset = timeseries.MultiRegionDataset.from_geodata_timeseries_df(
|
pd.DataFrame([], columns=[CommonFields.LOCATION_ID, CommonFields.DATE])
|
pandas.DataFrame
|
import lightgbm as lgbm
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from tqdm import tqdm
X_train = pd.read_pickle("data/train.pkl")
X_test =
|
pd.read_pickle("data/test.pkl")
|
pandas.read_pickle
|
# Copyright 2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
try:
import pandas as pd
except ImportError:
pd = None
SurfaceFlingerFrame = collections.namedtuple('SurfaceFlingerFrame', 'desired_present_time actual_present_time frame_ready_time')
GfxInfoFrame = collections.namedtuple('GfxInfoFrame', 'Flags IntendedVsync Vsync OldestInputEvent NewestInputEvent HandleInputStart AnimationStart PerformTraversalsStart DrawStart SyncQueued SyncStart IssueDrawCommandsStart SwapBuffers FrameCompleted')
# https://android.googlesource.com/platform/frameworks/base/+/marshmallow-release/libs/hwui/JankTracker.cpp
# Frames that are exempt from jank metrics.
# First-draw frames, for example, are expected to be slow,
# this is hidden from the user with window animations and other tricks
# Similarly, we don't track direct-drawing via Surface:lockHardwareCanvas() for now
# Android M: WindowLayoutChanged | SurfaceCanvas
GFXINFO_EXEMPT = 1 | 4
VSYNC_INTERVAL = 16666667
class FpsProcessor(object):
"""
Provides common object for processing surfaceFlinger output for frame
statistics.
This processor returns the four frame statistics below:
:FPS: Frames Per Second. This is the frame rate of the workload.
:frame_count: The total number of frames rendered during the execution of
the workload.
:janks: The number of "janks" that occurred during execution of the
workload. Janks are sudden shifts in frame rate. They result
in a "stuttery" UI. See http://jankfree.org/jank-busters-io
:not_at_vsync: The number of frames that did not render in a single
vsync cycle.
"""
def __init__(self, data, action=None, extra_data=None):
"""
data - a pandas.DataFrame object with frame data (e.g. frames.csv)
action - output metrics names with additional action specifier
extra_data - extra data given to use for calculations of metrics
"""
self.data = data
self.action = action
self.extra_data = extra_data
def process(self, refresh_period, drop_threshold): # pylint: disable=too-many-locals
"""
Generate frame per second (fps) and associated metrics for workload.
refresh_period - the vsync interval
drop_threshold - data points below this fps will be dropped
"""
fps = float('nan')
frame_count, janks, not_at_vsync = 0, 0, 0
vsync_interval = refresh_period
per_frame_fps = pd.Series()
# SurfaceFlinger Algorithm
if self.data.columns.tolist() == list(SurfaceFlingerFrame._fields):
# fiter out bogus frames.
bogus_frames_filter = self.data.actual_present_time != 0x7fffffffffffffff
actual_present_times = self.data.actual_present_time[bogus_frames_filter]
actual_present_time_deltas = actual_present_times.diff().dropna()
vsyncs_to_compose = actual_present_time_deltas.div(vsync_interval)
vsyncs_to_compose.apply(lambda x: int(round(x, 0)))
# drop values lower than drop_threshold FPS as real in-game frame
# rate is unlikely to drop below that (except on loading screens
# etc, which should not be factored in frame rate calculation).
per_frame_fps = (1.0 / (vsyncs_to_compose.multiply(vsync_interval / 1e9)))
keep_filter = per_frame_fps > drop_threshold
filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter]
per_frame_fps.name = 'fps'
if not filtered_vsyncs_to_compose.empty:
total_vsyncs = filtered_vsyncs_to_compose.sum()
frame_count = filtered_vsyncs_to_compose.size
if total_vsyncs:
fps = 1e9 * frame_count / (vsync_interval * total_vsyncs)
janks = self._calc_janks(filtered_vsyncs_to_compose)
not_at_vsync = self._calc_not_at_vsync(vsyncs_to_compose)
# GfxInfo Algorithm
elif self.data.columns.tolist() == list(GfxInfoFrame._fields):
frame_time = self.data.FrameCompleted - self.data.IntendedVsync
per_frame_fps = (1e9 / frame_time)
keep_filter = per_frame_fps > drop_threshold
per_frame_fps = per_frame_fps[keep_filter]
per_frame_fps.name = 'fps'
frame_count = self.data.index.size
if frame_count:
janks = frame_time[frame_time >= vsync_interval].count()
not_at_vsync = self.data.IntendedVsync - self.data.Vsync
not_at_vsync = not_at_vsync[not_at_vsync != 0].count()
if frame_count > 1:
duration = self.data.Vsync.iloc[-1] - self.data.Vsync.iloc[0]
fps = (1e9 * frame_count) / float(duration)
# If gfxinfocsv is provided, get stats from that instead
if self.extra_data:
series = pd.read_csv(self.extra_data, header=None, index_col=0, squeeze=True)
if not series.empty: # pylint: disable=maybe-no-member
frame_count = series['Total frames rendered']
janks = series['Janky frames']
not_at_vsync = series['Number Missed Vsync']
metrics = (fps, frame_count, janks, not_at_vsync)
return per_frame_fps, metrics
def percentiles(self):
# SurfaceFlinger Algorithm
if self.data.columns.tolist() == list(SurfaceFlingerFrame._fields):
frame_time = self.data.frame_ready_time.diff()
# GfxInfo Algorithm
elif self.data.columns.tolist() == list(GfxInfoFrame._fields):
frame_time = self.data.FrameCompleted - self.data.IntendedVsync
data = frame_time.dropna().quantile([0.90, 0.95, 0.99])
# Convert to ms, round to nearest, cast to int
data = data.div(1e6).round()
try:
data = data.astype('int')
except ValueError:
pass
# If gfxinfocsv is provided, get stats from that instead
if self.extra_data:
series =
|
pd.read_csv(self.extra_data, header=None, index_col=0, squeeze=True)
|
pandas.read_csv
|
from IMLearn import BaseEstimator
from challenge.agoda_cancellation_estimator import *
from challenge.agoda_cancellation_preprocessor import \
AgodaCancellationPreprocessor
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import pandas as pd
WEEK = 10
def load_data(filename: str):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
full_data = pd.read_csv(filename).drop_duplicates()
full_data = full_data.drop(
full_data[full_data["cancellation_policy_code"] == "UNKNOWN"].index)
full_data["cancellation_datetime"].fillna(0, inplace=True)
return full_data.dropna()
def evaluate_and_export(estimator: BaseEstimator, X: np.ndarray,
filename: str):
"""
Export to specified file the prediction results of given estimator on given testset.
File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing
predicted values.
Parameters
----------
estimator: BaseEstimator or any object implementing predict() method as in BaseEstimator (for example sklearn)
Fitted estimator to use for prediction
X: ndarray of shape (n_samples, n_features)
Test design matrix to predict its responses
filename:
path to store file at
"""
prediction = estimator.predict(X)
pd.DataFrame(prediction.astype(int),
columns=["predicted_values"]).to_csv(filename, index=False)
print(np.unique(prediction, return_counts=True))
def fill_missing_columns(design_matrix, test_set):
missing_cols = set(design_matrix.columns) - set(test_set.columns)
for c in missing_cols:
test_set[c] = 0
return test_set[design_matrix.columns]
if __name__ == '__main__':
np.random.seed(0)
# Load and preprocess data
full_data = load_data(
"../datasets/agoda_cancellation_train.csv")
p = AgodaCancellationPreprocessor(full_data)
base_design_matrix, week_specific = p.preprocess(full_data)
cancellation_labels_list = p.preprocess_labels(
full_data.cancellation_datetime,
full_data.booking_datetime)
design_matrix = pd.DataFrame()
cancellation_labels = pd.DataFrame()
for i in range(len(week_specific)):
pd.concat([design_matrix,
pd.concat([base_design_matrix, week_specific[i]], axis=1)])
pd.concat([cancellation_labels, cancellation_labels_list[i]])
for i in range(1, WEEK):
week_data =
|
pd.read_csv(f"week_{i}_test_data.csv")
|
pandas.read_csv
|
import math
import warnings
from copy import copy
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from typing_extensions import Literal
from etna.loggers import tslogger
if TYPE_CHECKING:
from etna.transforms.base import Transform
TTimestamp = Union[str, pd.Timestamp]
class TSDataset:
"""TSDataset is the main class to handle your time series data.
It prepares the series for exploration analyzing, implements feature generation with Transforms
and generation of future points.
Notes
-----
TSDataset supports custom indexing and slicing method.
It maybe done through these interface: ``TSDataset[timestamp, segment, column]``
If at the start of the period dataset contains NaN those timestamps will be removed.
During creation segment is casted to string type.
Examples
--------
>>> from etna.datasets import generate_const_df
>>> df = generate_const_df(periods=30, start_time="2021-06-01", n_segments=2, scale=1)
>>> df_ts_format = TSDataset.to_dataset(df)
>>> ts = TSDataset(df_ts_format, "D")
>>> ts["2021-06-01":"2021-06-07", "segment_0", "target"]
timestamp
2021-06-01 1.0
2021-06-02 1.0
2021-06-03 1.0
2021-06-04 1.0
2021-06-05 1.0
2021-06-06 1.0
2021-06-07 1.0
Freq: D, Name: (segment_0, target), dtype: float64
>>> from etna.datasets import generate_ar_df
>>> pd.options.display.float_format = '{:,.2f}'.format
>>> df_to_forecast = generate_ar_df(100, start_time="2021-01-01", n_segments=1)
>>> df_regressors = generate_ar_df(120, start_time="2021-01-01", n_segments=5)
>>> df_regressors = df_regressors.pivot(index="timestamp", columns="segment").reset_index()
>>> df_regressors.columns = ["timestamp"] + [f"regressor_{i}" for i in range(5)]
>>> df_regressors["segment"] = "segment_0"
>>> df_to_forecast = TSDataset.to_dataset(df_to_forecast)
>>> df_regressors = TSDataset.to_dataset(df_regressors)
>>> tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors, known_future="all")
>>> tsdataset.df.head(5)
segment segment_0
feature regressor_0 regressor_1 regressor_2 regressor_3 regressor_4 target
timestamp
2021-01-01 1.62 -0.02 -0.50 -0.56 0.52 1.62
2021-01-02 1.01 -0.80 -0.81 0.38 -0.60 1.01
2021-01-03 0.48 0.47 -0.81 -1.56 -1.37 0.48
2021-01-04 -0.59 2.44 -2.21 -1.21 -0.69 -0.59
2021-01-05 0.28 0.58 -3.07 -1.45 0.77 0.28
"""
idx = pd.IndexSlice
def __init__(
self,
df: pd.DataFrame,
freq: str,
df_exog: Optional[pd.DataFrame] = None,
known_future: Union[Literal["all"], Sequence] = (),
):
"""Init TSDataset.
Parameters
----------
df:
dataframe with timeseries
freq:
frequency of timestamp in df
df_exog:
dataframe with exogenous data;
known_future:
columns in ``df_exog[known_future]`` that are regressors,
if "all" value is given, all columns are meant to be regressors
"""
self.raw_df = self._prepare_df(df)
self.raw_df.index = pd.to_datetime(self.raw_df.index)
self.freq = freq
self.df_exog = None
self.raw_df.index = pd.to_datetime(self.raw_df.index)
try:
inferred_freq = pd.infer_freq(self.raw_df.index)
except ValueError:
warnings.warn("TSDataset freq can't be inferred")
inferred_freq = None
if inferred_freq != self.freq:
warnings.warn(
f"You probably set wrong freq. Discovered freq in you data is {inferred_freq}, you set {self.freq}"
)
self.raw_df = self.raw_df.asfreq(self.freq)
self.df = self.raw_df.copy(deep=True)
self.known_future = self._check_known_future(known_future, df_exog)
self._regressors = copy(self.known_future)
if df_exog is not None:
self.df_exog = df_exog.copy(deep=True)
self.df_exog.index = pd.to_datetime(self.df_exog.index)
self.df = self._merge_exog(self.df)
self.transforms: Optional[Sequence["Transform"]] = None
def transform(self, transforms: Sequence["Transform"]):
"""Apply given transform to the data."""
self._check_endings(warning=True)
self.transforms = transforms
for transform in self.transforms:
tslogger.log(f"Transform {repr(transform)} is applied to dataset")
columns_before = set(self.columns.get_level_values("feature"))
self.df = transform.transform(self.df)
columns_after = set(self.columns.get_level_values("feature"))
self._update_regressors(transform=transform, columns_before=columns_before, columns_after=columns_after)
def fit_transform(self, transforms: Sequence["Transform"]):
"""Fit and apply given transforms to the data."""
self._check_endings(warning=True)
self.transforms = transforms
for transform in self.transforms:
tslogger.log(f"Transform {repr(transform)} is applied to dataset")
columns_before = set(self.columns.get_level_values("feature"))
self.df = transform.fit_transform(self.df)
columns_after = set(self.columns.get_level_values("feature"))
self._update_regressors(transform=transform, columns_before=columns_before, columns_after=columns_after)
@staticmethod
def _prepare_df(df: pd.DataFrame) -> pd.DataFrame:
# cast segment to str type
df_copy = df.copy(deep=True)
columns_frame = df.columns.to_frame()
columns_frame["segment"] = columns_frame["segment"].astype(str)
df_copy.columns =
|
pd.MultiIndex.from_frame(columns_frame)
|
pandas.MultiIndex.from_frame
|
# -*- coding: utf-8 -*-
import pickle
import click
import logging
from pathlib import Path
import os.path
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
project_dir = Path(__file__).resolve().parents[2]
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
@click.option('--pca-var', default=0.95)
@click.option('--pca-output', type=click.Path(), default=os.path.join(project_dir, 'models/pca.p'))
@click.option('--colnames-output', type=click.Path(), default=os.path.join(project_dir, 'models/colnames.p'))
def main(input_filepath, output_filepath, pca_var, pca_output, colnames_output):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
# read raw dataset
logger.info('reading dataset at {}'.format(input_filepath))
df =
|
pd.read_csv(input_filepath)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 5 15:51:36 2018
@author: huangjin
"""
import pandas as pd
from tqdm import tqdm
import os
def gen_data(df, time_start, time_end):
df = df.sort_values(by=['code','pt'])
df = df[(df['pt']<=time_end)&(df['pt']>=time_start)]
col = [c for c in df.columns if c not in ['code','pt']]
df_tem = df.groupby(['code']).shift(1).fillna(0)
all_data = df[['code','pt']]
for j in tqdm(range(len(col))):
tem = df[col[j]]-df_tem[col[j]]
all_data = pd.concat([all_data, tem], axis=1)
return all_data
def process_data():
industry_name = ['非银金融','纺织服装','有色金属','计算机','交通运输','医药生物','钢铁','家用电器',
'采掘','国防军工','房地产','建筑材料','休闲服务','综合','建筑装饰','银行',
'轻工制造','化工','电子','机械设备','商业贸易','通信','电气设备','公用事业','传媒',
'农林牧渔','食品饮料','汽车']
industry_name_english = ['Non bank finance', 'textile and clothing', 'non-ferrous metals',
'computer', 'transportation', 'medical biology', 'steel',
'household appliances','Excavation','Defense Force',
'Real Estate', 'Building Materials', 'Leisure Services',
'Comprehensive', 'Architectural Decoration', 'Bank',
'Light manufacturing', 'Chemical', 'Electronic', 'Mechanical equipment',
'Commercial trade', 'Communication', 'Electrical equipment', 'Utilities',
'Media','Agriculture and fishing', 'food and beverage', 'car']
for industry_name_i in range(len(industry_name)):
# 市场值
market_value = pd.read_csv('market_values_end.csv')
stocks_info =
|
pd.read_csv('stocks_info.csv')
|
pandas.read_csv
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QThread, pyqtSignal
from GUI import Ui_MainWindow # generated GUI py file
import sys
import os
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
from datetime import datetime
import ctypes
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import pwlf
from GPyOpt.methods import BayesianOptimization
import openpyxl
import math
from scipy import stats
# python included dependencies: datetime, ctypes, math, os, sys
# installed package dependencies: dateutil, gpy, matplotlib, numpy, openpyxl (and image), pandas (and xlsxwriter), pwlf, pyqt, scipi
# class to populate a PyQT table view with a pandas dataframe
class PandasModel(QtCore.QAbstractTableModel):
def __init__(self, data, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self._data = data
def rowCount(self, parent=None):
return self._data.shape[0]
def columnCount(self, parent=None):
return self._data.shape[1]
def data(self, index, role=QtCore.Qt.DisplayRole):
if index.isValid():
if role == QtCore.Qt.DisplayRole:
return str(self._data.iloc[index.row(), index.column()])
return None
def headerData(self, col, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self._data.columns[col]
return None
# class to handle threading of datapoints so GUI is responsive
class DataPointsWorkThread(QThread):
signal = pyqtSignal('PyQt_PyObject')
signal_pb = pyqtSignal('PyQt_PyObject')
def __init__(self, data, start_date, end_date, pb_inc, option):
QThread.__init__(self)
# create instance of WorkerThread class and pass variables from application class as instance variables
self.data = data
self.start_date = start_date
self.end_date = end_date
self.pb_inc = pb_inc
self.option = option
def run(self):
# local variables from instance variables for reference convenience
data = self.data
start_date = self.start_date
end_date = self.end_date
pb_inc = self.pb_inc
option = self.option
# initialize datapoints data frame and progress bar
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
|
nv.validate_window_func("max", args, kwargs)
|
pandas.compat.numpy.function.validate_window_func
|
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
import asyncio
import pandas as pd # type: ignore
import pyEX # type: ignore
from collections import deque
from datetime import datetime, timedelta
from tqdm import tqdm # type: ignore
from aat.exchange import Exchange
from aat.config import InstrumentType, EventType, Side, TradingType
from aat.core import ExchangeType, Instrument, Event, Trade, Order
_iex_instrument_types = {
'ad': InstrumentType.EQUITY, # ad - ADR
'gdr': InstrumentType.EQUITY, # gdr - GDR
're': InstrumentType.OTHER, # re - REIT
'ce': InstrumentType.MUTUALFUND, # ce - Closed end fund
'si': InstrumentType.EQUITY, # si - Secondary Issue
'lp': InstrumentType.OTHER, # lp - Limited Partnerships
'cs': InstrumentType.EQUITY, # cs - Common Stock
'et': InstrumentType.EQUITY, # et - ETF
'wt': InstrumentType.OTHER, # wt - Warrant
'rt': InstrumentType.OTHER, # rt – Right
'oef': InstrumentType.MUTUALFUND, # oef - Open Ended Fund
'cef': InstrumentType.MUTUALFUND, # cef - Closed Ended Fund
'ps': InstrumentType.EQUITY, # ps - Preferred Stock
'ut': InstrumentType.OTHER, # ut - Unit
'struct': InstrumentType.OTHER, # struct - Structured Product
}
class IEX(Exchange):
'''Investor's Exchange'''
def __init__(self, trading_type, verbose, api_key, is_sandbox, timeframe='1y', start_date=None, end_date=None):
super().__init__(ExchangeType('iex'))
self._trading_type = trading_type
self._verbose = verbose
self._api_key = api_key
self._is_sandbox = is_sandbox
if trading_type == TradingType.LIVE:
assert not is_sandbox
self._timeframe = timeframe
if timeframe == 'live':
assert trading_type != TradingType.BACKTEST
if timeframe == '1d':
# intraday testing
# TODO if today is weekend/holiday, pick last day with data
self._start_date = datetime.strptime(start_date, '%Y%m%d') if start_date else datetime.today()
self._end_date = datetime.strptime(end_date, '%Y%m%d') if end_date else datetime.today()
self._subscriptions = []
# "Order" management
self._queued_orders = deque()
self._order_id = 1
# *************** #
# General methods #
# *************** #
async def connect(self):
'''connect to exchange. should be asynchronous.
For OrderEntry-only, can just return None
'''
self._client = pyEX.Client(self._api_key, 'sandbox' if self._is_sandbox else 'stable')
# ******************* #
# Market Data Methods #
# ******************* #
async def instruments(self):
'''get list of available instruments'''
instruments = []
symbols = self._client.symbols()
for record in symbols:
if not record['isEnabled'] or not record['type'] or record['type'] == 'temp':
continue
symbol = record['symbol']
brokerExchange = record['exchange']
type = _iex_instrument_types[record['type']]
currency = Instrument(type=InstrumentType.CURRENCY, name=record['currency'])
try:
inst = Instrument(name=symbol, type=type, exchange=self.exchange(), brokerExchange=brokerExchange, currency=currency)
except AssertionError:
# Happens sometimes on sandbox
continue
instruments.append(inst)
return instruments
async def subscribe(self, instrument):
self._subscriptions.append(instrument)
async def tick(self):
'''return data from exchange'''
if self._timeframe == 'live':
data = deque()
def _callback(record):
data.append(record)
self._client.tradesSSE(symbols=",".join([i.name for i in self._subscriptions]),
on_data=_callback)
while True:
while data:
record = data.popleft()
volume = record['volume']
price = record['price']
instrument = Instrument(record['symbol'], InstrumentType.EQUITY)
o = Order(volume=volume, price=price, side=Side.BUY, instrument=instrument, exchange=self.exchange())
t = Trade(volume=volume, price=price, taker_order=o, maker_orders=[])
yield Event(type=EventType.TRADE, target=t)
await asyncio.sleep(0)
else:
dfs = []
if self._timeframe != '1d':
for i in tqdm(self._subscriptions, desc="Fetching data..."):
df = self._client.chartDF(i.name, timeframe=self._timeframe)
df = df[['close', 'volume']]
df.columns = ['close:{}'.format(i.name), 'volume:{}'.format(i.name)]
dfs.append(df)
data = pd.concat(dfs, axis=1)
data.sort_index(inplace=True)
data = data.groupby(data.index).last()
data.drop_duplicates(inplace=True)
data.fillna(method='ffill', inplace=True)
else:
for i in tqdm(self._subscriptions, desc="Fetching data..."):
date = self._start_date
subdfs = []
while date <= self._end_date:
df = self._client.chartDF(i.name, timeframe='1d', date=date.strftime('%Y%m%d'))
if not df.empty:
df = df[['average', 'volume']]
df.columns = ['close:{}'.format(i.name), 'volume:{}'.format(i.name)]
subdfs.append(df)
date += timedelta(days=1)
dfs.append(
|
pd.concat(subdfs)
|
pandas.concat
|
# Create the preprocessor instance required for custom prediction on GCP AI Platform, requires the df.csv file generated by the jupyter notebook script
import pandas as pd
import numpy as np
import pickle
import preprocess
if __name__ == '__main__':
df =
|
pd.read_csv('df.csv', index_col=0)
|
pandas.read_csv
|
"""
Provide convenient functions to get FRED economic data.
Series can be found here:
https://fred.stlouisfed.org/
"""
from pilates import data_module
import pandas as pd
import numpy as np
from fredapi import Fred
FRED_API_KEY = '69e42ccbb7fa5da6cc743e564d08be62'
class fred(data_module):
def __init__(self, w):
data_module.__init__(self, w)
#######################################
# Make available some fredapi methods #
#######################################
def get_series_info(series):
self.fred = Fred(api_key = FRED_API_KEY)
return self.fred.get_series_info(series)
##########################################
# Additional generic methods for pilates #
##########################################
def get_series_for_data(self, data, series, col_date=None, tolerance=None):
if col_date is None:
# Use the date set at the library level
col_date = self.d.col_date
if tolerance is None:
# Series is given as a name
fred_series = self.fred.get_series(series)
# Get series information (frequency, etc)
df_fred_info = self.fred.get_series_info(series)
freq = df_fred_info['frequency_short']
if freq == 'A':
tolerance = pd.Timedelta('370 day')
elif freq == 'Q':
tolerance = pd.Timedelta('100 day')
elif freq == 'M':
tolerance = pd.Timedelta('40 day')
elif freq == 'D':
tolerance = pd.Timedelta('2 day')
else:
# Series is given as data
fred_series = series
series = 'series_name'
df = pd.DataFrame(fred_series).reset_index()
df.columns = ['date_fred', series]
# Prepare the user data
data[col_date] = pd.to_datetime(data[col_date])
# Find nearest dates
data = data.sort_values(col_date)
df = df.sort_values('date_fred')
data = data[np.isfinite(data[col_date])]
dfin = pd.merge_asof(data[col_date], df, left_on=col_date, right_on='date_fred',
direction='nearest',
tolerance=tolerance)
dfin.index = data.index
return dfin[series].astype('float32')
def get_series(self, series, data=None, col_date=None):
""" Return the FRED series to be added to the user data.
"""
# Connection to FRED (connect just before downloading series
# to avoid Error 504: Gateway Time-out)
self.fred = Fred(api_key = FRED_API_KEY)
if data is None:
return self.fred.get_series(series)
if col_date is None:
# Use the date set at the library level
col_date = self.d.col_date
dfin = data[[col_date]]
for s in series:
dfin[s] = self.get_series_for_data(dfin, s, col_date)
return dfin[series]
# Depreciated
def __get_10y_US_rates(self, data, col_date=None):
""" Return the 10 years treasury rates.
Depreciated. Use get_series().
"""
if col_date is None:
col_date = self.d.col_date
df_fred = self.fred.get_series('DGS10')
df =
|
pd.DataFrame(df_fred)
|
pandas.DataFrame
|
"""
This script is the ETL pipeline to create
'people' file and 'aquisition_facts'
"""
import pandas as pd
# Save links to csv files
url1 = 'https://als-hiring.s3.amazonaws.com/fake_data/2020-07-01_17%3A11%3A00/cons.csv'
url2 = 'https://als-hiring.s3.amazonaws.com/fake_data/2020-07-01_17%3A11%3A00/cons_email.csv'
url3 = 'https://als-hiring.s3.amazonaws.com/fake_data/2020-07-01_17%3A11%3A00/cons_email_chapter_subscription.csv'
# Load csv files into Pandas dataframes
cons =
|
pd.read_csv(url1)
|
pandas.read_csv
|
import matplotlib.pyplot as plt
import cantools
import pandas as pd
import cv2
import numpy as np
import os
LOG_FOLDER = "/media/andrei/Samsung_T51/nemodrive_data/18_nov/session_1/1542549716_log/"
CAN_FILE_PATH = os.path.join(LOG_FOLDER, "can_raw.log")
DBC_FILE = "logan.dbc"
SPEED_CAN_ID = "354"
OBD_SPEED_FILE = LOG_FOLDER + "obd_SPEED.log"
CAMERA_FILE_PREFIX = LOG_FOLDER + "camera_0"
def read_can_file(can_file_path):
df_can = pd.read_csv(can_file_path, header=None, delimiter=" ")
df_can["tp"] = df_can[0].apply(lambda x: float(x[1:-1]))
df_can["can_id"] = df_can[2].apply(lambda x: x[:x.find("#")])
df_can["data_str"] = df_can[2].apply(lambda x: x[x.find("#") + 1:])
return df_can
def get_can_data(db, cmd, data, msg):
decoded_info = db.decode_message(cmd, bytearray.fromhex(msg))
return decoded_info[data]
# =======================================================================
# -- Load DBC file stuff
db = cantools.database.load_file(DBC_FILE, strict=False)
cmd_names = [
("SPEED_SENSOR", "SPEED_KPS"),
("STEERING_SENSORS", "STEER_ANGLE"),
("BRAKE_SENSOR", "PRESIUNE_C_P")
]
cmd_idx = 0
cmd_name = cmd_names[cmd_idx][0]
data_name = cmd_names[cmd_idx][1]
"""
Decode values using:
Define: cmd_name, data_name, raw_can_msg
decoded_info = db.decode_message(cmd_name, bytearray.fromhex(raw_can_msg))
print(decoded_info[data_name])
ffmpeg -f v4l2 -video_size {}x{} -i /dev/video{} -c copy {}.mkv
"""
# =======================================================================
# -- Load Raw can
df_can = read_can_file(CAN_FILE_PATH)
# =======================================================================
# -- Load speed command
df_can_speed = df_can[df_can["can_id"] == SPEED_CAN_ID]
df_can_speed["speed"] = df_can_speed["data_str"].apply(lambda x: get_can_data(db, cmd_name,
data_name, x))
df_can_speed[df_can_speed.speed > 0]
df_can_speed["pts"] = df_can_speed["tp"] - 1539434950.220346
plt.plot(df_can_speed["tp"].values, df_can_speed["speed"])
plt.show()
# Write to csv
speed_file = os.path.join(LOG_FOLDER, "speed.csv")
df_can_speed.to_csv(speed_file)
# =======================================================================
# -- Load steer command
STEER_CMD_NAME = "STEERING_SENSORS"
STEER_CMD_DATA_NAME = "STEER_ANGLE"
STEERING_CAN_ID = "0C6"
df_can_steer = df_can[df_can["can_id"] == STEERING_CAN_ID]
df_can_steer["steering"] = df_can_steer["data_str"].apply(lambda x: get_can_data(db,
STEER_CMD_NAME,
STEER_CMD_DATA_NAME, x))
# Write to csv
steer_file = os.path.join(LOG_FOLDER, "steer.csv")
df_can_steer.to_csv(steer_file)
# --Plot can data
plt.plot(df_can_steer["tp"].values, df_can_steer["steering"])
plt.show()
steering_values = []
rng = 100
for index in range(rng, len(df_can_steer)):
x = df_can_steer.iloc[index-rng: index+1]["steering"].values
steering_values.append(np.abs(x[1:] - x[:-1]).sum())
steering_values_df = pd.Series(steering_values[:36494], name="Steering angle per second")
steering_values_df.describe()
# steering_values_df.plot()
steering_values_df.plot(kind="box")
plt.show()
# =======================================================================
# -- speed file
df_speed = pd.read_csv(OBD_SPEED_FILE, header=None)
df_speed["value"] = df_speed[1].apply(lambda x: None if x is None else x.split()[0])
df_speed["value"] = df_speed["value"].apply(lambda x: None if x == "None" else float(x))
df_speed.set_index(0, inplace=True)
no_unique_val = df_speed["value"].nunique()
# ==================================================================================================
# --Plot can data
plt.plot(df_can_speed["tp"].values, df_can_speed["speed"])
# Plot
plt.plot(df_speed.index, df_speed["value"].values)
plt.show()
# ==================================================================================================
# -- CAMERA processing
camera_start_tp = None
with open(CAMERA_FILE_PREFIX + "_timestamp") as file:
data = file.read()
camera_start_tp = float(data)
camera_start_tp = 1539434950.130855 - 35.4
pts_file =
|
pd.read_csv(CAMERA_FILE_PREFIX + "_pts.log", header=None)
|
pandas.read_csv
|
"""Multiple Factor Analysis (MFA)"""
import itertools
from matplotlib import markers
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import utils
from . import mca
from . import pca
from . import plot
class MFA(pca.PCA):
def __init__(self, groups=None, normalize=True, n_components=2, n_iter=10,
copy=True, check_input=True, random_state=None, engine='auto'):
super().__init__(
rescale_with_mean=False,
rescale_with_std=False,
n_components=n_components,
n_iter=n_iter,
copy=copy,
check_input=check_input,
random_state=random_state,
engine=engine
)
self.groups = groups
self.normalize = normalize
def fit(self, X, y=None):
# Checks groups are provided
if self.groups is None:
raise ValueError('Groups have to be specified')
# Check input
if self.check_input:
utils.check_array(X, dtype=[str, np.number])
# Prepare input
X = self._prepare_input(X)
# Check group types are consistent
self.all_nums_ = {}
for name, cols in sorted(self.groups.items()):
all_num = all(pd.api.types.is_numeric_dtype(X[c]) for c in cols)
all_cat = all(
|
pd.api.types.is_string_dtype(X[c])
|
pandas.api.types.is_string_dtype
|
# -*- coding: utf-8 -*-
import nose
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class TestIndexCoercion(tm.TestCase):
_multiprocess_can_split_ = True
def test_setitem_index_numeric_coercion_int(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.index.dtype, np.int64)
# int + int -> int
temp = s.copy()
temp[5] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 5]))
self.assertEqual(temp.index.dtype, np.int64)
# int + float -> float
temp = s.copy()
temp[1.1] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 1.1]))
self.assertEqual(temp.index.dtype, np.float64)
def test_setitem_index_numeric_coercion_float(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(s.index.dtype, np.float64)
# float + int -> int
temp = s.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
temp = s.copy()
temp[5.1] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=[1.1, 2.1, 3.1, 4.1, 5.1])
tm.assert_series_equal(temp, exp)
self.assertEqual(temp.index.dtype, np.float64)
def test_insert_numeric_coercion_int(self):
idx = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(idx.dtype, np.int64)
# int + int -> int
res = idx.insert(1, 1)
tm.assert_index_equal(res, pd.Index([1, 1, 2, 3, 4]))
self.assertEqual(res.dtype, np.int64)
# int + float -> float
res = idx.insert(1, 1.1)
tm.assert_index_equal(res, pd.Index([1, 1.1, 2, 3, 4]))
self.assertEqual(res.dtype, np.float64)
# int + bool -> int
res = idx.insert(1, False)
tm.assert_index_equal(res, pd.Index([1, 0, 2, 3, 4]))
self.assertEqual(res.dtype, np.int64)
def test_insert_numeric_coercion_float(self):
idx = pd.Float64Index([1, 2, 3, 4])
self.assertEqual(idx.dtype, np.float64)
# float + int -> int
res = idx.insert(1, 1)
tm.assert_index_equal(res, pd.Index([1., 1., 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
# float + float -> float
res = idx.insert(1, 1.1)
tm.assert_index_equal(res, pd.Index([1., 1.1, 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
# float + bool -> float
res = idx.insert(1, False)
tm.assert_index_equal(res, pd.Index([1., 0., 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
class TestSeriesCoercion(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.rep = {}
self.rep['object'] = ['a', 'b']
self.rep['int64'] = [4, 5]
self.rep['float64'] = [1.1, 2.2]
self.rep['complex128'] = [1 + 1j, 2 + 2j]
self.rep['bool'] = [True, False]
def test_setitem_numeric_coercion_int(self):
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.dtype, np.int64)
# int + int -> int
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
temp = s.copy()
temp[1] = 1.1
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
# int + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 3, 4]))
self.assertEqual(temp.dtype, np.complex128)
# int + bool -> int
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
def test_setitem_numeric_coercion_float(self):
s = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(s.dtype, np.float64)
# float + int -> float
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1.1, 1.0, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
# float + float -> float
temp = s.copy()
temp[1] = 1.1
tm.assert_series_equal(temp, pd.Series([1.1, 1.1, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
# float + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp, pd.Series([1.1, 1 + 1j, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.complex128)
# float + bool -> float
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1.1, 1.0, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
def test_setitem_numeric_coercion_complex(self):
s = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(s.dtype, np.complex128)
# complex + int -> complex
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + float -> complex
temp = s.copy()
temp[1] = 1.1
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp,
pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + bool -> complex
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
def test_setitem_numeric_coercion_bool(self):
s = pd.Series([True, False, True, False])
self.assertEqual(s.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
temp = s.copy()
temp[1] = 1
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# TODO_GH12747 The result must be int
temp = s.copy()
temp[1] = 3 # greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
tm.assert_series_equal(temp,
|
pd.Series([True, True, True, False])
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 4 13:05:41 2022
@author: maxime
"""
import vectorbtpro as vbt
import core.indicators as ic
from core.strat import Strat
from core.bt import BT, WQ
import numpy as np
import pandas as pd
### To back stage recent data
class StratLIVE(Strat):
def __init__(self,symbols,period,index_symbol):
self.period=period
self.symbols=symbols
self.index_symbol=index_symbol
self.retrieve_live()
##to plot the last days for instance
def retrieve_live(self):
all_symbols=self.symbols+[self.index_symbol]
cours=vbt.YFData.fetch(all_symbols, period=self.period,missing_index='drop')
cours_action=cours.select(self.symbols)
self.open =cours_action.get('Open')
self.high=cours_action.get('High')
self.low=cours_action.get('Low')
self.close=cours_action.get('Close')
self.volume=cours_action.get('Volume')
print("number of days retrieved: " + str(np.shape(self.close)[0]))
#assuming all actions in the same exchange here:
#cours_ind=vbt.YFData.fetch(index_symbol, period=period,missing_index='drop',**kwargs)
cours_index=cours.select(self.index_symbol)
self.open_ind =cours_index.get('Open')
self.high_ind=cours_index.get('High')
self.low_ind=cours_index.get('Low')
self.close_ind=cours_index.get('Close')
self.volume_ind=cours_index.get('Volume')
class btLIVE(BT):
def __init__(self,longshort,**kwargs):
st=kwargs.get("st")
self.high=st.high
self.low=st.low
self.close=st.close
self.open=st.open
self.volume=st.volume
self.high_ind=st.high_ind
self.low_ind=st.low_ind
self.close_ind=st.close_ind
self.open_ind=st.open_ind
self.volume_ind=st.volume_ind
self.symbols=self.close.columns.values
self.start_capital=10000
self.order_size=self.start_capital
self.capital=self.start_capital
self.longshort=longshort
self.entries=pd.DataFrame.vbt.empty_like(self.close, fill_value=False)
self.exits=pd.DataFrame.vbt.empty_like(self.close, fill_value=False)
self.exits_short=pd.DataFrame.vbt.empty_like(self.close, fill_value=False)
self.entries_short=pd.DataFrame.vbt.empty_like(self.close, fill_value=False)
self.pf=[]
self.pf_short=[]
st.strat_kama_stoch_matrend_bbands()
self.ent11=st.entries
self.ex11=st.exits
self.vol=ic.VBTNATR.run(self.high,self.low,self.close).natr
self.excluded=[]
self.hold_dur=0
self.candidates=[[] for ii in range(len(self.close))]
self.candidates_short=[[] for ii in range(len(self.close))]
self.symbols_simple=self.close.columns.values
self.symbols_complex=self.ent11.columns.values
self.last_order_dir="long"
class WQLIVE(WQ):
def __init__(self, nb,**kwargs):
st=kwargs.get("st")
self.high=st.high
self.low=st.low
self.close=st.close
self.open=st.open
self.volume=st.volume
self.high_ind=st.high_ind
self.low_ind=st.low_ind
self.close_ind=st.close_ind
self.open_ind=st.open_ind
self.volume_ind=st.volume_ind
self.candidates=[[] for ii in range(len(self.close))]
self.pf=[]
self.entries=pd.DataFrame.vbt.empty_like(self.close, fill_value=False)
self.exits=pd.DataFrame.vbt.empty_like(self.close, fill_value=False)
#actually only long are used in those strategy
self.exits_short=
|
pd.DataFrame.vbt.empty_like(self.close, fill_value=False)
|
pandas.DataFrame.vbt.empty_like
|
'''
Takes literature metallicities and makes new Fe/H basis
'''
import pickle
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from . import *
class LitMetallicities():
'''
Class to
1. read in Fe/H values from the literature
2. initialize data set cross-referencing functionality
'''
def __init__(self,
source_dir=config_red["data_dirs"]["DIR_LIT_HIGH_RES_FEH"]):
# stand-in that consists of our program star names
self.our_program_stars = pd.read_csv(source_dir + "our_program_stars_names_only.csv")
# Fe/H from Layden+ 1994; this may serve as the common basis for RRabs
self.layden_feh = pd.read_csv(source_dir + "layden_1994_abundances.dat")
# RES: "rather low"
# Fe/H Clementini+ 1995
self.clementini_feh = pd.read_csv(source_dir + "clementini_1995_abundances.dat")
# Fe/H Fernley+ 1996
self.fernley96_feh = pd.read_csv(source_dir + "fernley_1996_abundances.dat")
# RES: 60,000, FeI & FeII, 5900-8100 A
# Fe/H from Fernley+ 1997
self.fernley97_feh = pd.read_csv(source_dir + "fernley_1997_abundances.dat")
# RES: 60,000, two FeII lines, 5900-8100 A
# log(eps) from Lambert+ 1996
self.lambert_logeps = pd.read_csv(source_dir + "lambert_1996_abundances.dat")
# RES: ~23,000, FeII + photometric models, 3600-9000 A
# Fe/H from Wallerstein and Huang 2010, arXiv 1004.2017
self.wallerstein_feh = pd.read_csv(source_dir + "wallerstein_huang_2010_abundances.dat")
# RES: ~30,000, FeII
# Fe/H from Chadid+ 2017 ApJ 835.2:187 (FeI and II lines)
self.chadid_feh = pd.read_csv(source_dir + "chadid_2017_abundances.dat")
# RES: 38000, FeI & FeII, 3400-9900 A
# Fe/H from Liu+ 2013 Res Ast Astroph 13:1307
self.liu_feh = pd.read_csv(source_dir + "liu_2013_abundances.dat")
# RES: ~60,000, FeI (& FeII?), 5100-6400 A
# Fe/H from Nemec+ 2013
self.nemec_feh = pd.read_csv(source_dir + "nemec_2013_abundances.dat")
# RES: ~65,000 or 36,000, FeI & FeII, 5150-5200 A
# Fe/H from Solano+ 1997
self.solano_feh = pd.read_csv(source_dir + "solano_1997_abundances.dat")
# RES: 22,000 & 19,000, strong FeI lines, 4160-4390 & 4070-4490 A
# Fe/H from Pancino+ 2015 MNRAS 447:2404
self.pancino_feh =
|
pd.read_csv(source_dir + "pancino_2015_abundances.dat")
|
pandas.read_csv
|
from opentrons import robot, containers, instruments
from datetime import datetime
import numpy as np
import pandas as pd
import getch
import shutil
import os
import sys
def initialize_pipettes(p10_tipracks,p10s_tipracks,p200_tipracks,trash):
# Declare all of the pipettes
p10 = instruments.Pipette(
axis='a',
max_volume=10,
min_volume=0.5,
tip_racks=p10_tipracks,
trash_container=trash,
channels=8,
name='p10-8',
aspirate_speed=400,
dispense_speed=800
)
p10s = instruments.Pipette(
axis='a',
max_volume=10,
min_volume=0.5,
tip_racks=p10s_tipracks,
trash_container=trash,
channels=1,
name='p10-8s',
aspirate_speed=400,
dispense_speed=800
)
p200 = instruments.Pipette(
axis='b',
max_volume=200,
min_volume=20,
tip_racks=p200_tipracks,
trash_container=trash,
channels=1,
name='p200-1',
aspirate_speed=400,
dispense_speed=800
)
return p10,p10s,p200
def display_deck(robot):
df = pd.DataFrame(np.zeros((3,5)), columns=['A','B','C','D','E'], index=['3','2','1'])
df.loc[:,:] = "---"
for slot in robot.deck:
for child in slot.get_children_list():
print(slot.get_name()[0],slot.get_name()[1],child.get_name())
df.loc[slot.get_name()[1],slot.get_name()[0]] = child.get_name()
print(df)
def print_layout(locations):
# Generate an empty dataframe with the right shape
layout_table = pd.DataFrame(np.zeros((3,5)), columns=['A','B','C','D','E'], index=['3','2','1'])
layout_table.loc[:,:] = "---"
# Fill in the data frame with the locations
for obj in locations:
layout_table.loc[locations[obj][1], locations[obj][0]] = obj
# Displays the required plate map and waits to proceed
print("\n Please arrange the items in the following configuration: \n")
print(layout_table,"\n")
input("Press enter to continue")
def change_speed(robot):
robot.head_speed(5000)
def change_height(pipette,container,target,recalibrate=False):
counter = 0
z = 0
print("Change height - s-g:up h-l:down x:exit")
while True:
c = getch.getch()
if c == "s":
print("Up 20mm")
pipette.robot._driver.move(z=20,mode="relative")
z += 20
elif c == "d":
print("Up 5mm")
pipette.robot._driver.move(z=5,mode="relative")
z += 5
elif c == "f":
print("Up 0.5mm")
pipette.robot._driver.move(z=0.5,mode="relative")
z += 0.5
elif c == "g":
print("Up 0.1mm")
pipette.robot._driver.move(z=0.1,mode="relative")
z += 0.1
elif c == "h":
print("Down 0.1mm")
pipette.robot._driver.move(z=-0.1,mode="relative")
z += -0.1
elif c == "j":
print("Down 0.5mm")
pipette.robot._driver.move(z=-0.5,mode="relative")
z += -0.5
elif c == "k":
print("Down 5mm")
pipette.robot._driver.move(z=-5,mode="relative")
z += -5
elif c == "l":
print("Down 20mm")
pipette.robot._driver.move(z=-20,mode="relative")
z += -20
elif c == "x":
print("Exit")
break
counter += 1
pipette.calibrate_position((container,target.from_center(x=0, y=0, z=-1,reference=container)))
if recalibrate:
if counter > 1:
print("Will recalibrate")
redo = True
else:
print("Calibrated")
redo = False
return redo,z
else:
return z
def well_addresses():
'''Generates a list of well address A1-H12'''
letter = ["A","B","C","D","E","F","G","H"]
number = ["1","2","3","4","5","6","7","8","9","10","11","12"]
target_well = []
temp_well = 0
for n in number:
for l in letter:
temp_well = l + n
target_well.append(temp_well)
return target_well
def print_center(statement):
columns = shutil.get_terminal_size().columns
print('\n',statement.center(columns))
def request_info(statement,type='string',length=0,select_from=[]):
answer = input(statement)
if answer == '':
print("Please enter a value\n")
return request_info(statement,type=type)
elif type == 'int':
try:
int(answer)
return int(answer)
except:
print("Invalid type\n")
return request_info(statement,type=type)
elif type == 'list':
try:
nums = [int(num) for num in answer.split(' ')]
if len(nums) != length:
print('Requires {} inputs'.format(length))
return request_info(statement,type=type,length=length)
return [int(num) for num in answer.split(' ')]
except:
print("Invalid type\n")
return request_info(statement,type=type,length=length)
if select_from != []:
if answer not in select_from:
print('Not in list')
print(select_from)
return request_info(statement,type=type,select_from=select_from)
else:
return answer
return answer
def make_directory(path):
dir_name = path.split("/")[-1]
if os.path.exists(path):
print("Directory {} already exists".format(dir_name))
else:
# Generates a new directory with the ID# as its name
os.makedirs(path)
print("Making directory for {}".format(dir_name))
def check_robot():
try:
robot_name = str(os.environ["ROBOT_DEV"][-5:])
except:
sys.exit("Not connected to a robot, run roboswitch <robot_name> to change the robot")
robot_number = int(request_info("Run on this robot: {} ? 1-Yes, 2-No ".format(robot_name),type='int'))
if robot_number == 1:
print("Proceeding with run")
else:
sys.exit("Run `roboswitch <robot_name>` to change the robot")
def list_to_string(ls):
string = ''
for l in ls:
string += "'{}',".format(l)
return string[:-1]
def query_for_parts(status,enzyme,engine):
query = "SELECT parts.part_id,parts.status,fragments.fragment_name,plates.plate_id,wells.address,wells.volume,plates.id FROM parts\
INNER JOIN part_frag ON parts.id = part_frag.part_id\
INNER JOIN fragments ON part_frag.fragment_id = fragments.id\
INNER JOIN wells ON fragments.id = wells.fragment_id\
INNER JOIN plates on wells.plate_id = plates.id\
WHERE parts.status IN ({})\
AND parts.cloning_enzyme = '{}'".format(list_to_string(status),enzyme)
return pd.read_sql_query(query, con=engine)
def query_for_plates(parts,engine):
query = "SELECT parts.part_id,fragments.fragment_name,plates.plate_id,wells.address,wells.volume,plates.id,plates.plate_name FROM parts\
INNER JOIN part_frag ON parts.id = part_frag.part_id\
INNER JOIN fragments ON part_frag.fragment_id = fragments.id\
INNER JOIN wells ON fragments.id = wells.fragment_id\
INNER JOIN plates on wells.plate_id = plates.id\
WHERE parts.part_id IN ({})".format(list_to_string(parts))
return pd.read_sql_query(query, con=engine)
def query_for_fragments(parts,engine):
query = "SELECT parts.part_id,fragments.fragment_name FROM parts\
INNER JOIN part_frag ON parts.id = part_frag.part_id\
INNER JOIN fragments ON part_frag.fragment_id = fragments.id\
WHERE parts.part_id IN ({})".format(list_to_string(parts))
return pd.read_sql_query(query, con=engine)
def query_everything(engine):
print()
print(datetime.now(),'Began run')
query_outcomes = "SELECT parts.part_id,parts.status,wells.seq_outcome,wells.plate_type,builds.build_name,wells.misplaced FROM parts \
INNER JOIN wells ON parts.id = wells.part_id\
INNER JOIN plates ON wells.plate_id = plates.id\
INNER JOIN builds ON plates.build_id = builds.id"
query_frag = "SELECT parts.part_id,fragments.fragment_name FROM parts\
INNER JOIN part_frag ON parts.id = part_frag.part_id\
INNER JOIN fragments ON part_frag.fragment_id = fragments.id"
query_parts = "SELECT * FROM parts"
df_frag = pd.read_sql_query(query_frag, con=engine)
frags = df_frag.groupby('part_id')['fragment_name'].agg(len)
frags.name = 'Count'
frags = pd.DataFrame(frags).reset_index()
frags_dict = dict(zip(frags.part_id.tolist(),frags.Count.tolist()))
# subs_dict = dict(zip(df_frag.part_id.tolist(),df_frag.sub_name.tolist()))
print(datetime.now(),'Finished analyzing fragments')
def multiple(x):
if len(x) == 1:
x.append('N/A')
return x
def find_outcome(x):
if x in df_out_dict.keys():
return df_out_dict[x]
else:
return ['N/A','N/A']
def find_build(x):
if x in df_build_dict.keys():
return df_build_dict[x]
else:
return ['N/A','N/A']
def simplify_outcome(x):
if "mutation" in x:
return 'cloning_mutation'
elif "bad" in x:
return 'sequence_failure'
else:
return x
df_res = pd.read_sql_query(query_outcomes, con=engine)
df_res = df_res[df_res.plate_type == 'seq_plate']
df_out = df_res.groupby('part_id')['seq_outcome'].apply(list)
df_out.name = 'Outcomes'
df_out = pd.DataFrame(df_out).reset_index()
df_out.Outcomes = df_out.Outcomes.apply(multiple)
df_out_dict = dict(zip(df_out.part_id.tolist(),df_out.Outcomes.tolist()))
df_build = df_res.groupby('part_id')['build_name'].apply(list)
df_build.name = 'Builds'
df_build = pd.DataFrame(df_build).reset_index()
df_build.Builds = df_build.Builds.apply(multiple)
df_build_dict = dict(zip(df_build.part_id.tolist(),df_build.Builds.tolist()))
print(datetime.now(),'Finished analyzing outcomes')
df_parts =
|
pd.read_sql_query(query_parts, con=engine)
|
pandas.read_sql_query
|
"""
coding: utf-8
@authour: <NAME>, modified <NAME>
Inspired by:
https://github.com/hmelberg/stats-to-pandas/blob/master/stats_to_pandas/__init__.py
https://github.com/eurostat/prophet
"""
from __future__ import print_function
import pandas as pd
import requests
import ast
from pyjstat import pyjstat
from collections import OrderedDict
from ipywidgets import widgets
from IPython.display import display
# todo: consider using jsonstat instead of pyjstat
class API_to_data:
def __init__(self, language='en', base_url='http://data.ssb.no/api/v0'):
"""
Parameters:
-----------
language: string
default in Statistics Norway: 'en' (Search for English words)
optional in Statistics Norway: 'no' (Search for Norwegian words)
url: string
default in Statistics Norway: 'http://data.ssb.no/api/v0'
different defaults can be specified
"""
self.language = language
self.burl = base_url
self.furl = None
self.variables = None
self.time = None
def search(self, phrase):
"""
Search for tables that contain the phrase in Statistics Norway.
Returns a pandas dataframe with the results.
Not case sensitive.
Language sensitive (specified in the language option)
Example
-------
df = search("income")
Parameters
----------
phrase: string
The phrase can contain several words (space separated):
search("export Norwegian parrot")
It also supports trucation:
search("pharma*")
"""
# todo: make converter part of the default specification only for statistics norway
convert = {'æ' : '%C3%A6', 'Æ' : '%C3%86', 'ø' : '%C3%B8', 'Ø' : '%C3%98', 'å' : '%C3%A5', 'Å' : '%C3%85',
'"' : '%22', '(' : '%28', ')' : '%29', ' ' : '%20'}
search_str = '{base_url}/{language}/table/?query={phrase}'.format(base_url=self.burl, language=self.language, phrase=phrase)
for k, v in convert.items():
search_str = search_str.replace(k, v)
df =
|
pd.read_json(search_str)
|
pandas.read_json
|
import pandas as pd
import numpy as np
from zipfile import ZipFile
from cleanco import cleanco
import re
import os
import zipcode
from pyzipcode import ZipCodeDatabase
#State formats
state_format_extrastuff = '^[a-zA-Z]{2} '
state_format_right = '^[A-Z]{2}$'
#Set up standardization for date fields
date_right_pattern = '^[0-9]{4}-[0-9]{2}-[0-9]{2}$'
date_long_format = '^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}$'
date_long_format_no_space = '^[0-9]{4}-[0-9]{2}-[0-9]{2}[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}$'
date_slash_format = '^[0-9]{1,2}/[0-9]{1,2}/[0-9]{4}$'
date_slash_format_5='^[0-9]{1,2}/[0-9]{1,2}/[0-9]{5}$'
date_slash_format_2 = '^[0-9]{1,2}/[0-9]{1,2}/[0-9]{2}$'
date_slash_format_long = '^[0-9]{1,2}/[0-9]{1,2}/[0-9]{4}[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}$'
#Set up standardization for zipcodes
right_zip = '^[0-9]{5}$'
long_zip = '^[0-9]{5}-[0-9]{4}$'
zip_dict = {'77098':'TX','49855':'MI', '48075':'MI','48334':'MI', '48034':'MI', '48335':'MI','95014':'CA','92833':'CA','92834':'CA','10962':'NY','98117':'WA','98765':'WA','20008':'DC','20002':'DC','21704':'MD','20814':'MD','21208':'MD','22222':'VA'}
state_values = ['TX', 'MA', 'MI', 'CA', 'VA', 'NJ', 'NY', 'PA', 'FL', 'MN', 'IL',
'MD', 'CT', 'WA', 'IA', 'CO', 'AZ', 'GA', 'OK', 'LA', 'WI', 'ND',
'UT', 'IN', 'OH', 'KY', 'NC', 'NH', 'MO', 'TN', 'ID', 'VT', 'DC',
'SD', 'AL', 'OR', 'AR', 'NM', 'SC', 'NE', 'DE', 'WY', 'HI', 'KS',
'WV', 'ME', 'RI', 'NV', 'MS', 'AK','MT','PR','GU','VI']
def get_file_from_zip(zipname, filename):
with ZipFile('/'.join(['raw_data', zipname])) as zip_file:
with zip_file.open(filename) as file:
return pd.read_csv(file, dtype=str, encoding='Latin-1')
def count_wrong_formats(df):
has_submitted = "CASE_SUBMITTED" in df.columns
has_decision = "DECISION_DATE" in df.columns
wrong_submitted_date_format = 0
wrong_decision_date_format=0
wrong_employer_state_format = 0
wrong_worksite_state_format = 0
wrong_zip_format = 0
for index, row in df.iterrows():
if has_submitted and (pd.isnull(row['CASE_SUBMITTED']) or not re.match(date_right_pattern,row['CASE_SUBMITTED'])):
wrong_submitted_date_format+=1
if has_decision and (pd.isnull(row['DECISION_DATE']) or not re.match(date_right_pattern,row['DECISION_DATE'])):
wrong_decision_date_format+=1
if row['EMPLOYER_STATE'] not in state_values:
wrong_employer_state_format+=1
if row['WORKSITE_STATE'] not in state_values:
wrong_worksite_state_format+=1
if pd.isnull(row['EMPLOYER_POSTAL_CODE']) or not re.match(right_zip, row['EMPLOYER_POSTAL_CODE']):
wrong_zip_format+=1
if has_submitted:
print(wrong_submitted_date_format,"bad CASE_SUBMITTED fields")
if has_decision:
print(wrong_decision_date_format,"bad DECISION_DATE fields")
print(wrong_employer_state_format, "bad EMPLOYER_STATE fields")
print(wrong_worksite_state_format, "bad WORKSITE_STATE fields")
print(wrong_zip_format, "bad EMPLOYER_POSTAL_CODE fields")
def address_concatenate(data):
data["EMPLOYER_ADDRESS"] = (data["EMPLOYER_ADDRESS1"].map(str) +" "+ data["EMPLOYER_ADDRESS2"].map(str)).str.replace('nan','').str.upper().str.strip()
#Set up cleaning functions
def employer_name_uppercase_cleanco(x):
if pd.isnull(x):
return x
else:
return cleanco(str(x).upper()).clean_name()
def uppercase_nopunct(x):
if pd.isnull(x):
return x
else:
return str(x).upper().replace('[^\w\s]','').replace('/',' ').replace('"','').replace(' ',' ').strip()
def clean_states(x):
if pd.isnull(x):
return x
if str(x).strip()=='':
return np.nan
elif re.match(state_format_right, x):
return x
elif re.match(state_format_extrastuff, x) and x[:2] in state_values:
return x[:2]
elif x=="MARYLAND":
return "MD"
elif x=="NEW YORK":
return "NY"
else:
print("\t\tState Error, ",x)
return x
def case_status_withdrawn(x):
if x.WITHDRAWN=="Y" or x.WITHDRAWN=="y":
return x.CASE_STATUS+"-WITHDRAWN"
else:
return x.CASE_STATUS
def dot_code_format(x):
if pd.isnull(x):
return x
else:
return str(x).zfill(3)
def check_apply_date_pattern(x):
if pd.isnull(x):
return x
else:
x=str(x).replace(" ",'')
if re.match(date_right_pattern,x):
return x
elif re.match(date_long_format, x):
return x[:10].strip()
elif re.match(date_long_format_no_space,x):
return x[:10].strip()
elif re.match(date_slash_format, x):
month = x[:x.index('/')]
day = x[x.index('/')+1:x.index('/',x.index('/')+1)]
year = x[x.index('/',x.index('/')+1)+1:]
return "{}-{}-{}".format(year.strip(), month.zfill(2).strip(), day.zfill(2).strip())
elif re.match(date_slash_format_5, x):
month = x[:x.index('/')]
day = x[x.index('/')+1:x.index('/',x.index('/')+1)]
year = x[x.index('/',x.index('/')+1)+1:x.index('/',x.index('/')+1)+5]
return "{}-{}-{}".format(year.strip(), month.zfill(2).strip(), day.zfill(2).strip())
elif re.match(date_slash_format_2, x):
month = x[:x.index('/')]
day = x[x.index('/')+1:x.index('/',x.index('/')+1)]
year = x[x.index('/',x.index('/')+1)+1:]
return "20{}-{}-{}".format(year.strip(), month.zfill(2).strip(), day.zfill(2).strip())
elif re.match(date_slash_format_long, x):
month = x[:x.index('/')]
day = x[x.index('/')+1:x.index('/',x.index('/')+1)]
year = x[x.index('/',x.index('/')+1)+1:x.index('/',x.index('/')+1)+5]
return "{}-{}-{}".format(year.strip(), month.zfill(2).strip(), day.zfill(2).strip())
else:
print("\t\tDATE ERROR: x is",x,"returning None")
return None
def fix_zip(x):
if pd.isnull(x):
return x
x=str(x).strip()
if x.isnumeric():
x=x.zfill(5)
if re.match(right_zip,x):
return x
elif re.match(long_zip,x):
return x[:5]
else:
print("\t\tError in zip,",x)
return x
def fix_visa_class(x):
if pd.isnull(x):
return x
x=str(x).strip()
valid = ["H-1B","E-3","H-1B1 CHILE","H-1B1 SINGAPORE"]
if x in valid:
return x
elif x=="R":
return "H-1B"
elif x=="A":
return "E-3"
elif x=="C":
return "H-1B1 CHILE"
elif x=="S":
return "H-1B1 SINGAPORE"
else:
print("\t\tError in visa class, ",x)
return x
def fix_employer_states(x):
if pd.isnull(x['EMPLOYER_STATE']):
return x.EMPLOYER_STATE
elif x['EMPLOYER_STATE'] not in state_values and x.EMPLOYER_POSTAL_CODE.isdigit():
if x.EMPLOYER_POSTAL_CODE in zip_dict.keys():
return zip_dict[x.EMPLOYER_POSTAL_CODE]
else:
newzip = zipcode.isequal(x.EMPLOYER_POSTAL_CODE)
if newzip is not None:
return newzip.state
else:
pyzip = findpyzipcode(x)
if pyzip:
return pyzip
print("\t\tCouldnt find",x.EMPLOYER_POSTAL_CODE,"in either zip package")
return x.EMPLOYER_STATE
elif x['EMPLOYER_STATE'] not in state_values and re.match(state_format_right, x.EMPLOYER_POSTAL_CODE.upper()):
#print("Employer state found in postal code, shifting",x.EMPLOYER_POSTAL_CODE.upper())
x.EMPLOYER_CITY = x.EMPLOYER_STATE
return x.EMPLOYER_POSTAL_CODE.upper()
else:
return x.EMPLOYER_STATE
def findpyzipcode(x):
zcdb = ZipCodeDatabase()
try:
value = zcdb[x]
return value.state
except IndexError:
return None
def fix_worksite_states(x):
if pd.isnull(x['WORKSITE_STATE']):
return x.WORKSITE_STATE
elif x['WORKSITE_STATE'] not in state_values and x['EMPLOYER_STATE'] in state_values and x.WORKSITE_CITY == x.EMPLOYER_CITY:
#print("Cities match, returning",x.EMPLOYER_STATE,"to fix",x.WORKSITE_STATE)
return x.EMPLOYER_STATE
else:
return x.WORKSITE_STATE
def check_states(x):
if x.CASE_STATUS=="CERTIFIED":
if pd.isnull(x.EMPLOYER_STATE):
print("Null Employer State: {}, Status - {}".format(x.EMPLOYER_NAME, x.CASE_STATUS))
elif x['EMPLOYER_STATE'] not in state_values:
print("Wrong Employer State: Name - {}, City - {}, State - {}, Zip - {}, Status - {}".format(x.EMPLOYER_NAME, x.EMPLOYER_CITY, x.EMPLOYER_STATE, x.EMPLOYER_POSTAL_CODE, x.CASE_STATUS))
if
|
pd.isnull(x['WORKSITE_STATE'])
|
pandas.isnull
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 21:45:02 2021
@author: <NAME> -Spatial structure index value distribution of urban streetscape
"""
from mayavi import mlab
from tvtk.api import tvtk # python wrappers for the C++ vtk ecosystem
import numpy as np
from mayavi import mlab
from tvtk.api import tvtk
import matplotlib.pyplot as plt # only for manipulating the input image
import glob,os, pickle
label_mapping={
0:"pole",
1:"slight",
2:"bboard",
3:"tlight",
4:"car",
5:"truck",
6:"bicycle",
7:"motor",
8:"bus",
9:"tsignf",
10:"tsignb",
11:"road",
12:"sidewalk",
13:"curbcut",
14:"crosspln",
15:"bikelane",
16:"curb",
17:"fence",
18:"wall",
19:"building",
20:"person",
21:"rider",
22:"sky",
23:"vege",
24:"terrain",
25:"markings",
26:"crosszeb",
27:"Nan",
}
label_color={
0:(117,115,102), #"pole",
1:(212,209,156),#"slight",
2:(224,9,9),#"bboard",
3:(227,195,66),#"tlight",
4:(137,147,169),#"car",
5:(53,67,98),#"truck",
6:(185,181,51),#"bicycle",
7:(238,108,91),#"motor",
8:(247,5,5),#"bus",
9:(127,154,82),#"tsignf",
10:(193,209,167),#"tsignb",
11:(82,83,76),#"road",
12:(141,142,133),#"sidewalk",
13:(208,212,188),#"curbcut",
14:(98,133,145),#"crosspln",
15:(194,183,61),#"bikelane",
16:(141,139,115),#"curb",
17:(157,186,133),#"fence",
18:(114,92,127),#"wall",
19:(78,61,76),#"building",
20:(100,56,67),#"person",
21:(240,116,148),#"rider",
22:(32,181,191),#"sky",
23:(55,204,26),#"vege",
24:(84,97,82),#"terrain",
25:(231,24,126),#"markings",
26:(141,173,166),#"crosszeb",
27:(0,0,0),#"Nan",
}
def auto_sphere(image_file):
# create a figure window (and scene)
fig = mlab.figure(size=(600, 600))
# load and map the texture
img = tvtk.JPEGReader()
img.file_name = image_file
texture = tvtk.Texture(input_connection=img.output_port, interpolate=1)
# print(texture)
# (interpolate for a less raster appearance when zoomed in)
# use a TexturedSphereSource, a.k.a. getting our hands dirty
R = 1
Nrad = 180
# create the sphere source with a given radius and angular resolution
sphere = tvtk.TexturedSphereSource(radius=R, theta_resolution=Nrad,
phi_resolution=Nrad)
# print(sphere)
# assemble rest of the pipeline, assign texture
sphere_mapper = tvtk.PolyDataMapper(input_connection=sphere.output_port)
sphere_actor = tvtk.Actor(mapper=sphere_mapper, texture=texture)
fig.scene.add_actor(sphere_actor)
mlab.show()
def manual_sphere(image_file):
# caveat 1: flip the input image along its first axis
img = plt.imread(image_file) # shape (N,M,3), flip along first dim
outfile = image_file.replace('.jfif', '_flipped.jpg')
# flip output along first dim to get right chirality of the mapping
img = img[::-1,...]
plt.imsave(outfile, img)
image_file = outfile # work with the flipped file from now on
# parameters for the sphere
R = 1 # radius of the sphere
Nrad = 180 # points along theta and phi
phi = np.linspace(0, 2 * np.pi, Nrad) # shape (Nrad,)
theta = np.linspace(0, np.pi, Nrad) # shape (Nrad,)
phigrid,thetagrid = np.meshgrid(phi, theta) # shapes (Nrad, Nrad)
# compute actual points on the sphere
x = R * np.sin(thetagrid) * np.cos(phigrid)
y = R * np.sin(thetagrid) * np.sin(phigrid)
z = R * np.cos(thetagrid)
# create figure
mlab.figure(size=(600, 600))
# create meshed sphere
mesh = mlab.mesh(x,y,z)
mesh.actor.actor.mapper.scalar_visibility = False
mesh.actor.enable_texture = True # probably redundant assigning the texture later
# load the (flipped) image for texturing
img = tvtk.JPEGReader(file_name=image_file)
texture = tvtk.Texture(input_connection=img.output_port, interpolate=0, repeat=0)
# print(texture)
mesh.actor.actor.texture = texture
# tell mayavi that the mapping from points to pixels happens via a sphere
mesh.actor.tcoord_generator_mode = 'sphere' # map is already given for a spherical mapping
cylinder_mapper = mesh.actor.tcoord_generator
# caveat 2: if prevent_seam is 1 (default), half the image is used to map half the sphere
cylinder_mapper.prevent_seam = 0 # use 360 degrees, might cause seam but no fake data
#cylinder_mapper.center = np.array([0,0,0]) # set non-trivial center for the mapping sphere if necessary
def mpl_sphere(image_file):
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
img = plt.imread(image_file)
# define a grid matching the map size, subsample along with pixels
theta = np.linspace(0, np.pi, img.shape[0])
phi = np.linspace(0, 2*np.pi, img.shape[1])
print(img.shape)
print(theta.shape)
print(phi.shape)
#'''
count =180 #180 # keep 180 points along theta and phi
theta_inds = np.linspace(0, img.shape[0] - 1, count).round().astype(int)
phi_inds = np.linspace(0, img.shape[1] - 1, count).round().astype(int)
# print(theta_inds)
theta = theta[theta_inds]
phi = phi[phi_inds]
print(theta.shape)
print(phi.shape)
img = img[np.ix_(theta_inds, phi_inds)]
print("_"*50)
print(img.shape)
#'''
theta,phi = np.meshgrid(theta, phi)
print(theta.shape,phi.shape)
R = 1
# sphere
x = R * np.sin(theta) * np.cos(phi)
y = R * np.sin(theta) * np.sin(phi)
z = R * np.cos(theta)
# create 3d Axes
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x.T, y.T, z.T, facecolors=img/255, cstride=1, rstride=1) # we've already pruned ourselves
# make the plot more spherical
ax.axis('scaled')
plt.show()
def spherical_segs_pts_show(label_seg_fn,label_color):
from tqdm import tqdm
import pickle
import numpy as np
from skimage.io._plugins.pil_plugin import ndarray_to_pil, pil_to_ndarray
from PIL import Image,ImageOps
fig=mlab.figure(size=(600, 600))
print(label_seg_fn)
with open(label_seg_fn,'rb') as f:
label_seg=pickle.load(f).numpy()
print('\nseg shape={}'.format(label_seg.shape))
# define a grid matching the map size, subsample along with pixels
theta=np.linspace(0, np.pi, label_seg.shape[0])
phi=np.linspace(0, 2*np.pi, label_seg.shape[1])
print("theta shape={};phi shape={}".format(theta.shape,phi.shape))
theta,phi=np.meshgrid(theta, phi)
print("theta shape={};phi shape={}".format(theta.shape,phi.shape))
label_seg_color=np.array([label_color[v] for v in label_seg.flatten()]).reshape((label_seg.shape[0],label_seg.shape[1],3))
print("\nlabel_seg_color shape={}".format(label_seg_color.shape))
R=10
# sphere
x=R * np.sin(theta) * np.cos(phi)
y=R * np.sin(theta) * np.sin(phi)
z=R * np.cos(theta)
print("x,y,z shape={},{},{}".format(x.shape,y.shape,z.shape))
mask=label_seg==22
# print(len(np.extract(mask,x.T)),len(np.extract(mask,y.T)),len(np.extract(mask,z.T)),len(np.extract(mask,label_seg_color[:,:,0]/255)))
mlab.points3d(x.T, y.T, z.T, label_seg_color[:,:,0]/255,) #opacity=0.75,scale_factor=0.1
# mlab.points3d(np.extract(mask,x.T),np.extract(mask,y.T),np.extract(mask,z.T),)
theta_phi=np.dstack((theta,phi))
mlab.show()
def spherical_segs_object_changing(label_seg_path,label_color):
from tqdm import tqdm
import glob,os
import pickle
import numpy as np
from skimage.io._plugins.pil_plugin import ndarray_to_pil, pil_to_ndarray
from PIL import Image,ImageOps
# fig=mlab.figure(size=(600, 600))
label_seg_fns=glob.glob(os.path.join(label_seg_path,'*.pkl'))
# print(label_seg_fns)
for label_seg_fn in tqdm(label_seg_fns):
print(label_seg_fn)
with open(label_seg_fn,'rb') as f:
label_seg=pickle.load(f).numpy()
print('\nseg shape={}'.format(label_seg.shape))
# define a grid matching the map size, subsample along with pixels
theta=np.linspace(0, np.pi, label_seg.shape[0])
phi=np.linspace(0, 2*np.pi, label_seg.shape[1])
print("theta shape={};phi shape={}".format(theta.shape,phi.shape))
theta,phi=np.meshgrid(theta, phi)
print("theta shape={};phi shape={}".format(theta.shape,phi.shape))
label_seg_color=np.array([label_color[v] for v in label_seg.flatten()]).reshape((label_seg.shape[0],label_seg.shape[1],3))
print("\nlabel_seg_color shape={}".format(label_seg_color.shape))
R=10
# sphere
x=R * np.sin(theta) * np.cos(phi)
y=R * np.sin(theta) * np.sin(phi)
z=R * np.cos(theta)
print("x,y,z shape={},{},{}".format(x.shape,y.shape,z.shape))
mask=label_seg==22
# print(len(np.extract(mask,x.T)),len(np.extract(mask,y.T)),len(np.extract(mask,z.T)),len(np.extract(mask,label_seg_color[:,:,0]/255)))
# mlab.points3d(x.T, y.T, z.T, label_seg_color[:,:,0]/255,) #opacity=0.75,scale_factor=0.1
# mlab.show()
# mlab.points3d(np.extract(mask,x.T),np.extract(mask,y.T),np.extract(mask,z.T),)
theta_phi=np.dstack((theta,phi))
break
def fns_sort(fns_list):
from pathlib import Path
fns_dict={int(Path(p).stem.split('_')[-1]):p for p in fns_list}
fns_dict_key=list(fns_dict.keys())
fns_dict_key.sort()
fns_dict_sorted=[fns_dict[k] for k in fns_dict_key]
return fns_dict_sorted
def panorama_object_change(label_seg_path,label_color):
from tqdm import tqdm
import glob,os
import pickle
import numpy as np
from skimage.io._plugins.pil_plugin import ndarray_to_pil, pil_to_ndarray
from PIL import Image,ImageOps
from pathlib import Path
import pandas as pd
from sklearn import preprocessing
label_seg_fns=glob.glob(os.path.join(label_seg_path,'*.pkl'))
label_seg_fns_sorted=fns_sort(label_seg_fns)
pixels={}
# i=0
for label_seg_fn in tqdm(label_seg_fns_sorted):
# print(label_seg_fn)
with open(label_seg_fn,'rb') as f:
label_seg=pickle.load(f).numpy()
# print('\nseg shape={}'.format(label_seg.shape))
fn_stem=Path(label_seg_fn).stem
fn_key,fn_idx=fn_stem.split("_")
pixels[fn_stem]=label_seg.flatten()
# if i==10:break
# i+=1
img_pixels_df=
|
pd.DataFrame.from_dict(pixels,orient='index')
|
pandas.DataFrame.from_dict
|
#!/usr/bin/env python
import asyncio
import logging
import time
from base64 import b64decode
from typing import Optional, List, Dict, AsyncIterable, Any
from zlib import decompress, MAX_WBITS
import aiohttp
import pandas as pd
import signalr_aio
import ujson
from signalr_aio import Connection
from signalr_aio.hubs import Hub
from async_timeout import timeout
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessage
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.core.data_type.order_book_tracker_entry import OrderBookTrackerEntry, BittrexOrderBookTrackerEntry
from hummingbot.core.utils import async_ttl_cache
from hummingbot.core.utils.async_utils import safe_gather
from hummingbot.logger import HummingbotLogger
from hummingbot.market.bittrex.bittrex_active_order_tracker import BittrexActiveOrderTracker
from hummingbot.market.bittrex.bittrex_order_book import BittrexOrderBook
EXCHANGE_NAME = "Bittrex"
BITTREX_REST_URL = "https://api.bittrex.com/v3"
BITTREX_EXCHANGE_INFO_PATH = "/markets"
BITTREX_MARKET_SUMMARY_PATH = "/markets/summaries"
BITTREX_TICKER_PATH = "/markets/tickers"
BITTREX_WS_FEED = "https://socket.bittrex.com/signalr"
MAX_RETRIES = 20
MESSAGE_TIMEOUT = 30.0
SNAPSHOT_TIMEOUT = 10.0
NaN = float("nan")
class BittrexAPIOrderBookDataSource(OrderBookTrackerDataSource):
PING_TIMEOUT = 10.0
_bittrexaobds_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._bittrexaobds_logger is None:
cls._bittrexaobds_logger = logging.getLogger(__name__)
return cls._bittrexaobds_logger
def __init__(self, symbols: Optional[List[str]] = None):
super().__init__()
self._symbols: Optional[List[str]] = symbols
self._websocket_connection: Optional[Connection] = None
self._websocket_hub: Optional[Hub] = None
self._snapshot_msg: Dict[str, any] = {}
@classmethod
@async_ttl_cache(ttl=60 * 30, maxsize=1)
async def get_active_exchange_markets(cls) -> pd.DataFrame:
"""
Returned data frame should have symbol as index and include USDVolume, baseAsset and quoteAsset
"""
market_path_url = f"{BITTREX_REST_URL}{BITTREX_EXCHANGE_INFO_PATH}"
summary_path_url = f"{BITTREX_REST_URL}{BITTREX_MARKET_SUMMARY_PATH}"
ticker_path_url = f"{BITTREX_REST_URL}{BITTREX_TICKER_PATH}"
async with aiohttp.ClientSession() as client:
market_response, ticker_response, summary_response = await safe_gather(
client.get(market_path_url), client.get(ticker_path_url), client.get(summary_path_url)
)
market_response: aiohttp.ClientResponse = market_response
ticker_response: aiohttp.ClientResponse = ticker_response
summary_response: aiohttp.ClientResponse = summary_response
if market_response.status != 200:
raise IOError(
f"Error fetching active Bittrex markets information. " f"HTTP status is {market_response.status}."
)
if ticker_response.status != 200:
raise IOError(
f"Error fetching active Bittrex market tickers. " f"HTTP status is {ticker_response.status}."
)
if summary_response.status != 200:
raise IOError(
f"Error fetching active Bittrex market summaries. " f"HTTP status is {summary_response.status}."
)
market_data, ticker_data, summary_data = await safe_gather(
market_response.json(), ticker_response.json(), summary_response.json()
)
ticker_data: Dict[str, Any] = {item["symbol"]: item for item in ticker_data}
summary_data: Dict[str, Any] = {item["symbol"]: item for item in summary_data}
market_data: List[Dict[str, Any]] = [
{**item, **ticker_data[item["symbol"]], **summary_data[item["symbol"]]}
for item in market_data
if item["symbol"] in ticker_data and item["symbol"] in summary_data
]
all_markets: pd.DataFrame =
|
pd.DataFrame.from_records(data=market_data, index="symbol")
|
pandas.DataFrame.from_records
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# gh-17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# gh-17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
pd.Timestamp('2010-02-06')],
'b': [9, 5, 4, 3],
'c': [5, 3, 4, 2],
'd': [1, 2, 3, 4]})
def fun(x):
return (1, 2)
result = df.apply(fun, axis=1)
expected = Series([(1, 2) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_coerce_for_shapes(self):
# we want column names to NOT be propagated
# just because the shape matches the input shape
df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_names(self):
# if a Series is returned, we should use the resulting index names
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: Series([1, 2, 3],
index=['test', 'other', 'cols']),
axis=1)
expected = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other', 'cols'])
assert_frame_equal(result, expected)
result = df.apply(
lambda x: pd.Series([1, 2], index=['test', 'other']), axis=1)
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other'])
assert_frame_equal(result, expected)
def test_result_type(self):
# result_type should be consistent no matter which
# path we take in the code
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
expected = df.copy()
expected.columns = [0, 1, 2]
assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
expected = df[['A', 'B']].copy()
expected.columns = [0, 1]
assert_frame_equal(result, expected)
# broadcast result
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3],
index=columns),
axis=1,
result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
# series result
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
expected = df.copy()
assert_frame_equal(result, expected)
# series result with other index
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3], index=columns),
axis=1)
expected = df.copy()
expected.columns = columns
assert_frame_equal(result, expected)
@pytest.mark.parametrize("result_type", ['foo', 1])
def test_result_type_error(self, result_type):
# allowed result_type
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2, 3],
axis=1,
result_type=result_type)
@pytest.mark.parametrize(
"box",
[lambda x: list(x),
lambda x: tuple(x),
lambda x: np.array(x, dtype='int64')],
ids=['list', 'tuple', 'array'])
def test_consistency_for_boxed(self, box):
# passing an array or list should not affect the output shape
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: box([1, 2]), axis=1)
expected = Series([box([1, 2]) for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1)
assert_frame_equal(result, expected)
def zip_frames(frames, axis=1):
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return
|
pd.concat(zipped, axis=1)
|
pandas.concat
|
#!/usr/bin/env python
import pandas as pd
import argparse
import datetime
import time
import sys
import investpy
scrap_delay = 2
def main():
parser = argparse.ArgumentParser(description='scrap investing.com daily close')
parser.add_argument('-input_file', type=str, default='data_tickers/investing_stock_info.csv', help='input file')
parser.add_argument('-output_prefix', type=str, default='../stock_data/raw_daily_investing_stock/investing_stock_', help='prefix of the output file')
parser.add_argument('-date', type=str, help='Specify the date')
args = parser.parse_args()
if args.date is None:
scrap_date = datetime.date.today()
args.date = str(scrap_date)
filename = args.output_prefix + args.date + '.csv'
df_input =
|
pd.read_csv(args.input_file)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 08:58:04 2019
@author: <NAME>
"""
####################################################################
#Federal Columbia River Power System Model developed from HYSSR
#This version operates on a daily time step.
####################################################################
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def simulate(sim_years):
def ismember(A,B):
x = np.in1d(A,B)
return 1 if x==True else 0
#Data input - select flows september 1 (244 julian date)
#d=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',usecols=range(3,58), header=None, names=np.arange(0,55))
d=pd.read_csv('Synthetic_streamflows/synthetic_streamflows_FCRPS.csv',header=None)
d = d.iloc[0:(sim_years+3)*365,:]
d = d.iloc[243:len(d)-122,:]
d= d.reset_index(drop=True)
[r, c]= d.shape
for i in range(0,r):
for j in range(0,c):
if np.isnan(d.iloc[i,j]) == True:
d.iloc[i,j] = 0
no_days = int(len(d))
no_years = int(no_days/365)
calender=pd.read_excel('PNW_hydro/FCRPS/daily_streamflows.xlsx','Calender',header=None)
c=np.zeros((no_days,4))
for i in range(0,no_years):
c[i*365:i*365+365,0] = calender.iloc[:,0]
c[i*365:i*365+365,1] = calender.iloc[:,1]
c[i*365:i*365+365,2] = calender.iloc[:,2]
c[i*365:i*365+365,3] = calender.iloc[:,3]+i
#month, day, year of simulation data
months = c[:,0]
days = c[:,1]
julians = c[:,2]
no_days = len(c)
years = c[:,3]
#Simulated Runoff ("local" flow accretions defined by USACE and BPA)
local = d
#%Project indices (consistent with HYSSR)
#% No. Name HYSSR ID
#% 0 MICA 1
#% 1 ARROW 2
#% 2 LIBBY 3
#% 3 DUNCAN 5
#% 4 <NAME> 6
#% 5 HUNGRY HORSE 10
#% 6 KERR 11
#% 7 <NAME> 16
#% 8 POST FALLS 18
#% 9 <NAME> 19
#% 10 CHELAN 20
#% 11 BROWNLEE 21
#% 12 DWORSHAK 31
#% 13 NOXON 38
#% 14 ROUND BUTTE 40
#% 15 REVELSTOKE 41
#% 16 SEVEN MILE 46
#% 17 BRILLIANT 50
#% 18 <NAME> 54
#% 19 CABINET GRGE 56
#% 20 BOX CANYON 57
#% 21 BOUNDARY 58
#% 22 WANETA 59
#% 23 UPPER FALLS 61
#% 24 MONROE ST 62
#% 25 NINE MILE 63
#% 26 LONG LAKE 64
#% 27 LITTLE FALLS 65
#% 28 <NAME> 66
#% 29 WELLS 67
#% 30 ROCKY REACH 68
#% 31 ROCK ISLAND 69
#% 32 WANAPUM 70
#% 33 PRIE<NAME> 71
#% 34 OXBOW 72
#% 35 LOWER GRANITE 76
#% 36 LITTLE GOOSE 77
#% 37 LOWER MONUMENTAL 78
#% 38 <NAME> 79
#% 39 MCNARY 80
#% 40 <NAME> 81
#% 41 DALLES 82
#% 42 BONNEVILLE 83
#% 43 <NAME> 84
#% 44 PELTON 95
#% 45 <NAME> 146
#% 46 <NAME> 400
#%Simulated unregulated flows for The Dalles. These flows are used to
#%adjust rule curves for storage reservoirs. In reality, ESP FORECASTS are
#%used-- but for now, the model assumes that BPA/USACE gets the forecast
#%exactly right.
TDA_unreg = d.iloc[:,47]
############
#%Additional input to fix the model
#
#%Fix No.1 Kerr Dam lack of input from CFM
CFM5L= d.iloc[:,48]
#%add to Kerr
#
#%Fix No.2 Lower Granite lack of input from 5 sources
#%Following will be add ti LWG
ORF5H= d.iloc[:,49]
SPD5L= d.iloc[:,50]
ANA5L= d.iloc[:,51]
LIM5L= d.iloc[:,52]
WHB5H= d.iloc[:,53]
#%
#%Fix No.3 lack of input McNary
#%
YAK5H= d.iloc[:,54]
##############################################
##############################################
#%Flood control curves
MCD_fc = pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='Mica_Daily',usecols='B:M',skiprows=3,header=None)
ARD_fc = pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='Arrow_Daily',usecols='B:G',skiprows=3,header=None)
LIB_fc = pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='Libby_Daily',usecols='B:F',skiprows=3,header=None)
DNC_fc = pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='Duncan_Daily',usecols='B:F',skiprows=3,header=None)
HHO_fc = pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='HungryHorse_Daily',usecols='B:I',skiprows=3,header=None)
ALB_fc = pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='Albeni_Daily',usecols='B:C',skiprows=3,header=None)
GCL_fc = pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='GrandCoulee_Daily',usecols='B:J',skiprows=3,header=None)
BRN_fc = pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='Brownlee_Daily',usecols='B:U',skiprows=3,header=None)
DWR_fc = pd.read_excel('PNW_hydro/FCRPS/res_specs2.xlsx',sheet_name='Dworshak_Daily',usecols='B:K',skiprows=3,header=None)
#%Read Other CRCs
LIB_CRC_hm3 = pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='LIB_CRC',header=None)
LIB_ARC_hm3 = pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='LIB_ARC', header=None)
#% COR_CRC_hm3 = pd.read_excel('ORC.xlsx','COR_CRC', 'A1:B365')
HHO_CRC_hm3 = pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='HHO_CRC', header=None)
HHO_ARC_hm3 = pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='HHO_ARC', header=None)
#% KER_CRC_hm3 = pd.read_excel('ORC.xlsx','KER_CRC', 'A1:B365')
ALF_CRC_hm3 = pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='ALF_CRC', header=None)
ALF_ARC_hm3 = pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='ALF_ARC', header=None)
GCL_CRC_hm3 = pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='GCL_CRC', header=None)
GCL_ARC_hm3 = pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='GCL_ARC', header=None)
#% CHL_CRC_hm3 = pd.read_excel('ORC.xlsx','CHL_CRC', 'A1:B365')
BRN_CRC_hm3 = pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='BRN_CRC', header=None)
BRN_ARC_hm3 = pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='BRN_ARC', header=None)
DWR_CRC_hm3 = pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='DWR_CRC', header=None)
DWR_ARC_hm3 =
|
pd.read_excel('PNW_hydro/FCRPS/ORC.xlsx',sheet_name='DWR_ARC', header=None)
|
pandas.read_excel
|
import operator
from operator import methodcaller
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.expr.datatypes as dt
from ... import connect, execute
pytestmark = pytest.mark.pandas
def test_table_column(t, df):
expr = t.plain_int64
result = expr.execute()
expected = df.plain_int64
tm.assert_series_equal(result, expected)
def test_literal(client):
assert client.execute(ibis.literal(1)) == 1
def test_read_with_undiscoverable_type(client):
with pytest.raises(TypeError):
client.table('df')
def test_selection(t, df):
expr = t[
((t.plain_strings == 'a') | (t.plain_int64 == 3))
& (t.dup_strings == 'd')
]
result = expr.execute()
expected = df[
((df.plain_strings == 'a') | (df.plain_int64 == 3))
& (df.dup_strings == 'd')
].reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
def test_mutate(t, df):
expr = t.mutate(x=t.plain_int64 + 1, y=t.plain_int64 * 2)
result = expr.execute()
expected = df.assign(x=df.plain_int64 + 1, y=df.plain_int64 * 2)
tm.assert_frame_equal(result[expected.columns], expected)
def test_project_scope_does_not_override(t, df):
col = t.plain_int64
expr = t[
[
col.name('new_col'),
col.sum()
.over(ibis.window(group_by='dup_strings'))
.name('grouped'),
]
]
result = expr.execute()
expected = pd.concat(
[
df[['plain_int64', 'dup_strings']].rename(
columns={'plain_int64': 'new_col'}
),
df.groupby('dup_strings')
.plain_int64.transform('sum')
.reset_index(drop=True)
.rename('grouped'),
],
axis=1,
)[['new_col', 'grouped']]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'where',
[
lambda t: None,
lambda t: t.dup_strings == 'd',
lambda t: (t.dup_strings == 'd') | (t.plain_int64 < 100),
],
)
@pytest.mark.parametrize(
('ibis_func', 'pandas_func'),
[
(methodcaller('abs'), np.abs),
(methodcaller('ceil'), np.ceil),
(methodcaller('exp'), np.exp),
(methodcaller('floor'), np.floor),
(methodcaller('ln'), np.log),
(methodcaller('log10'), np.log10),
(methodcaller('log', 2), lambda x: np.log(x) / np.log(2)),
(methodcaller('log2'), np.log2),
(methodcaller('round', 0), lambda x: x.round(0).astype('int64')),
(methodcaller('round', -2), methodcaller('round', -2)),
(methodcaller('round', 2), methodcaller('round', 2)),
(methodcaller('round'), lambda x: x.round().astype('int64')),
(methodcaller('sign'), np.sign),
(methodcaller('sqrt'), np.sqrt),
],
)
def test_aggregation_group_by(t, df, where, ibis_func, pandas_func):
ibis_where = where(t)
expr = t.group_by(t.dup_strings).aggregate(
avg_plain_int64=t.plain_int64.mean(where=ibis_where),
sum_plain_float64=t.plain_float64.sum(where=ibis_where),
mean_float64_positive=ibis_func(t.float64_positive).mean(
where=ibis_where
),
neg_mean_int64_with_zeros=(-t.int64_with_zeros).mean(where=ibis_where),
nunique_dup_ints=t.dup_ints.nunique(),
)
result = expr.execute()
pandas_where = where(df)
mask = slice(None) if pandas_where is None else pandas_where
expected = (
df.groupby('dup_strings')
.agg(
{
'plain_int64': lambda x, mask=mask: x[mask].mean(),
'plain_float64': lambda x, mask=mask: x[mask].sum(),
'dup_ints': 'nunique',
'float64_positive': (
lambda x, mask=mask, func=pandas_func: func(x[mask]).mean()
),
'int64_with_zeros': lambda x, mask=mask: (-x[mask]).mean(),
}
)
.reset_index()
.rename(
columns={
'plain_int64': 'avg_plain_int64',
'plain_float64': 'sum_plain_float64',
'dup_ints': 'nunique_dup_ints',
'float64_positive': 'mean_float64_positive',
'int64_with_zeros': 'neg_mean_int64_with_zeros',
}
)
)
# TODO(phillipc): Why does pandas not return floating point values here?
expected['avg_plain_int64'] = expected.avg_plain_int64.astype('float64')
result['avg_plain_int64'] = result.avg_plain_int64.astype('float64')
expected[
'neg_mean_int64_with_zeros'
] = expected.neg_mean_int64_with_zeros.astype('float64')
result[
'neg_mean_int64_with_zeros'
] = result.neg_mean_int64_with_zeros.astype('float64')
expected['mean_float64_positive'] = expected.mean_float64_positive.astype(
'float64'
)
result['mean_float64_positive'] = result.mean_float64_positive.astype(
'float64'
)
lhs = result[expected.columns]
rhs = expected
tm.assert_frame_equal(lhs, rhs)
def test_aggregation_without_group_by(t, df):
expr = t.aggregate(
avg_plain_int64=t.plain_int64.mean(),
sum_plain_float64=t.plain_float64.sum(),
)
result = expr.execute()[['avg_plain_int64', 'sum_plain_float64']]
new_names = {
'plain_float64': 'sum_plain_float64',
'plain_int64': 'avg_plain_int64',
}
expected = (
pd.Series(
[df['plain_int64'].mean(), df['plain_float64'].sum()],
index=['plain_int64', 'plain_float64'],
)
.to_frame()
.T.rename(columns=new_names)
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_group_by_with_having(t, df):
expr = (
t.group_by(t.dup_strings)
.having(t.plain_float64.sum() == 5)
.aggregate(avg_a=t.plain_int64.mean(), sum_c=t.plain_float64.sum())
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.agg({'plain_int64': 'mean', 'plain_float64': 'sum'})
.reset_index()
.rename(columns={'plain_int64': 'avg_a', 'plain_float64': 'sum_c'})
)
expected = expected.loc[expected.sum_c == 5, ['avg_a', 'sum_c']]
tm.assert_frame_equal(result[expected.columns], expected)
def test_group_by_rename_key(t, df):
expr = t.groupby(t.dup_strings.name('foo')).aggregate(
dup_string_count=t.dup_strings.count()
)
assert 'foo' in expr.schema()
result = expr.execute()
assert 'foo' in result.columns
expected = (
df.groupby('dup_strings')
.dup_strings.count()
.rename('dup_string_count')
.reset_index()
.rename(columns={'dup_strings': 'foo'})
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('reduction', ['mean', 'sum', 'count', 'std', 'var'])
@pytest.mark.parametrize(
'where',
[
lambda t: (t.plain_strings == 'a') | (t.plain_strings == 'c'),
lambda t: (t.dup_strings == 'd')
& ((t.plain_int64 == 1) | (t.plain_int64 == 3)),
lambda t: None,
],
)
def test_reduction(t, df, reduction, where):
func = getattr(t.plain_int64, reduction)
mask = where(t)
expr = func(where=mask)
result = expr.execute()
df_mask = where(df)
expected_func = getattr(
df.loc[df_mask if df_mask is not None else slice(None), 'plain_int64'],
reduction,
)
expected = expected_func()
assert result == expected
@pytest.mark.parametrize(
'reduction',
[
lambda x: x.any(),
lambda x: x.all(),
lambda x: ~(x.any()),
lambda x: ~(x.all()),
],
)
def test_boolean_aggregation(t, df, reduction):
expr = reduction(t.plain_int64 == 1)
result = expr.execute()
expected = reduction(df.plain_int64 == 1)
assert result == expected
@pytest.mark.parametrize('column', ['float64_with_zeros', 'int64_with_zeros'])
def test_null_if_zero(t, df, column):
expr = t[column].nullifzero()
result = expr.execute()
expected = df[column].replace(0, np.nan)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('left', 'right', 'expected', 'compare'),
[
pytest.param(
lambda t: ibis.literal(1),
lambda t: ibis.literal(1),
lambda df: np.nan,
np.testing.assert_array_equal, # treats NaNs as equal
id='literal_literal_equal',
),
pytest.param(
lambda t: ibis.literal(1),
lambda t: ibis.literal(2),
lambda df: 1,
np.testing.assert_equal,
id='literal_literal_not_equal',
),
pytest.param(
lambda t: t.dup_strings,
lambda t: ibis.literal('a'),
lambda df: df.dup_strings.where(df.dup_strings != 'a'),
tm.assert_series_equal,
id='series_literal',
),
pytest.param(
lambda t: t.dup_strings,
lambda t: t.dup_strings,
lambda df: df.dup_strings.where(df.dup_strings != df.dup_strings),
tm.assert_series_equal,
id='series_series',
),
pytest.param(
lambda t: ibis.literal('a'),
lambda t: t.dup_strings,
lambda df: pd.Series(
np.where(df.dup_strings == 'a', np.nan, 'a'), index=df.index
),
tm.assert_series_equal,
id='literal_series',
),
],
)
def test_nullif(t, df, left, right, expected, compare):
expr = left(t).nullif(right(t))
result = execute(expr)
compare(result, expected(df))
def test_nullif_inf():
df = pd.DataFrame({'a': [np.inf, 3.14, -np.inf, 42.0]})
con = connect({'t': df})
t = con.table('t')
expr = t.a.nullif(np.inf).nullif(-np.inf)
result = expr.execute()
expected = pd.Series([np.nan, 3.14, np.nan, 42.0], name='a')
tm.assert_series_equal(result, expected)
def test_group_concat(t, df):
expr = t.groupby(t.dup_strings).aggregate(
foo=t.plain_int64.group_concat(',')
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.apply(lambda df: ','.join(df.plain_int64.astype(str)))
.reset_index()
.rename(columns={0: 'foo'})
)
tm.assert_frame_equal(result[expected.columns], expected)
@pytest.mark.parametrize('offset', [0, 2])
def test_frame_limit(t, df, offset):
n = 5
df_expr = t.limit(n, offset=offset)
result = df_expr.execute()
expected = df.iloc[offset : offset + n].reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
@pytest.mark.xfail(
raises=AttributeError, reason='TableColumn does not implement limit'
)
@pytest.mark.parametrize('offset', [0, 2])
def test_series_limit(t, df, offset):
n = 5
s_expr = t.plain_int64.limit(n, offset=offset)
result = s_expr.execute()
tm.assert_series_equal(result, df.plain_int64.iloc[offset : offset + n])
@pytest.mark.parametrize(
('key', 'pandas_by', 'pandas_ascending'),
[
(lambda t, col: [ibis.desc(t[col])], lambda col: [col], False),
(
lambda t, col: [t[col], ibis.desc(t.plain_int64)],
lambda col: [col, 'plain_int64'],
[True, False],
),
(
lambda t, col: [ibis.desc(t.plain_int64 * 2)],
lambda col: ['plain_int64'],
False,
),
],
)
@pytest.mark.parametrize(
'column',
['plain_datetimes_naive', 'plain_datetimes_ny', 'plain_datetimes_utc'],
)
def test_sort_by(t, df, column, key, pandas_by, pandas_ascending):
expr = t.sort_by(key(t, column))
result = expr.execute()
expected = df.sort_values(
pandas_by(column), ascending=pandas_ascending
).reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
def test_complex_sort_by(t, df):
expr = t.sort_by(
[ibis.desc(t.plain_int64 * t.plain_float64), t.plain_float64]
)
result = expr.execute()
expected = (
df.assign(foo=df.plain_int64 * df.plain_float64)
.sort_values(['foo', 'plain_float64'], ascending=[False, True])
.drop(['foo'], axis=1)
.reset_index(drop=True)
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_distinct(t, df):
expr = t.dup_strings.distinct()
result = expr.execute()
expected = pd.Series(df.dup_strings.unique(), name='dup_strings')
tm.assert_series_equal(result, expected)
def test_count_distinct(t, df):
expr = t.dup_strings.nunique()
result = expr.execute()
expected = df.dup_strings.nunique()
assert result == expected
def test_value_counts(t, df):
expr = t.dup_strings.value_counts()
result = expr.execute()
expected = (
df.dup_strings.value_counts()
.reset_index()
.rename(columns={'dup_strings': 'count'})
.rename(columns={'index': 'dup_strings'})
.sort_values(['dup_strings'])
.reset_index(drop=True)
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_table_count(t, df):
expr = t.count()
result = expr.execute()
expected = len(df)
assert result == expected
def test_weighted_average(t, df):
expr = t.groupby(t.dup_strings).aggregate(
avg=(t.plain_float64 * t.plain_int64).sum() / t.plain_int64.sum()
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.apply(
lambda df: (df.plain_int64 * df.plain_float64).sum()
/ df.plain_int64.sum()
)
.reset_index()
.rename(columns={0: 'avg'})
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_group_by_multiple_keys(t, df):
expr = t.groupby([t.dup_strings, t.dup_ints]).aggregate(
avg_plain_float64=t.plain_float64.mean()
)
result = expr.execute()
expected = (
df.groupby(['dup_strings', 'dup_ints'])
.agg({'plain_float64': 'mean'})
.reset_index()
.rename(columns={'plain_float64': 'avg_plain_float64'})
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_mutate_after_group_by(t, df):
gb = t.groupby(t.dup_strings).aggregate(
avg_plain_float64=t.plain_float64.mean()
)
expr = gb.mutate(x=gb.avg_plain_float64)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.agg({'plain_float64': 'mean'})
.reset_index()
.rename(columns={'plain_float64': 'avg_plain_float64'})
)
expected = expected.assign(x=expected.avg_plain_float64)
tm.assert_frame_equal(result[expected.columns], expected)
def test_groupby_with_unnamed_arithmetic(t, df):
expr = t.groupby(t.dup_strings).aggregate(
naive_variance=(
(t.plain_float64 ** 2).sum() - t.plain_float64.mean() ** 2
)
/ t.plain_float64.count()
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.agg(
{
'plain_float64': lambda x: ((x ** 2).sum() - x.mean() ** 2)
/ x.count()
}
)
.reset_index()
.rename(columns={'plain_float64': 'naive_variance'})
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_isnull(t, df):
expr = t.strings_with_nulls.isnull()
result = expr.execute()
expected = df.strings_with_nulls.isnull()
tm.assert_series_equal(result, expected)
def test_notnull(t, df):
expr = t.strings_with_nulls.notnull()
result = expr.execute()
expected = df.strings_with_nulls.notnull()
|
tm.assert_series_equal(result, expected)
|
pandas.testing.assert_series_equal
|
import numpy as np
import six.moves.cPickle as pickle
import gzip
import os
def load_mnist_data(dataset):
""" Load the dataset
Code adapted from http://deeplearning.net/tutorial/code/logistic_sgd.py
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
"""
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
from six.moves import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print('Downloading data from %s' % origin)
urllib.request.urlretrieve(origin, dataset)
# Load the dataset
with gzip.open(dataset, 'rb') as f:
try:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
except:
train_set, valid_set, test_set = pickle.load(f)
# train_set, valid_set, test_set format: tuple(input, target)
# input is a numpy.ndarray of 2 dimensions (a matrix), np.float32
# where each row corresponds to an example. target is a
# numpy.ndarray of 1 dimension (vector), np.int64 that has the same length
# as the number of rows in the input. It should give the target
# to the example with the same index in the input.
return train_set, valid_set, test_set
def convert_to_one_hot(vals, max_val=0):
"""Helper method to convert label array to one-hot array."""
if max_val == 0:
max_val = vals.max() + 1
one_hot_vals = np.zeros((vals.size, max_val))
one_hot_vals[np.arange(vals.size), vals] = 1
return one_hot_vals
###########################################################################
# adult
###########################################################################
def maybe_download(train_data, test_data):
import pandas as pd
"""if adult data "train.csv" and "test.csv" are not in your directory,
download them.
"""
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
if not os.path.exists(train_data):
print("downloading training data...")
df_train = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
names=COLUMNS, skipinitialspace=True)
else:
df_train = pd.read_csv("train.csv")
if not os.path.exists(test_data):
print("downloading testing data...")
df_test = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
names=COLUMNS, skipinitialspace=True, skiprows=1)
else:
df_test = pd.read_csv("test.csv")
return df_train, df_test
def cross_columns(x_cols):
"""simple helper to build the crossed columns in a pandas dataframe
"""
crossed_columns = dict()
colnames = ['_'.join(x_c) for x_c in x_cols]
for cname, x_c in zip(colnames, x_cols):
crossed_columns[cname] = x_c
return crossed_columns
def val2idx(df, cols):
"""helper to index categorical columns before embeddings.
"""
val_types = dict()
for c in cols:
val_types[c] = df[c].unique()
val_to_idx = dict()
for k, v in val_types.items():
val_to_idx[k] = {o: i for i, o in enumerate(val_types[k])}
for k, v in val_to_idx.items():
df[k] = df[k].apply(lambda x: v[x])
unique_vals = dict()
for c in cols:
unique_vals[c] = df[c].nunique()
return df, unique_vals
def onehot(x):
from sklearn.preprocessing import OneHotEncoder
return np.array(OneHotEncoder().fit_transform(x).todense())
def wide(df_train, df_test, wide_cols, x_cols, target):
import pandas as pd
print('Processing wide data')
df_train['IS_TRAIN'] = 1
df_test['IS_TRAIN'] = 0
df_wide = pd.concat([df_train, df_test])
crossed_columns_d = cross_columns(x_cols)
categorical_columns = list(
df_wide.select_dtypes(include=['object']).columns)
wide_cols += list(crossed_columns_d.keys())
for k, v in crossed_columns_d.items():
df_wide[k] = df_wide[v].apply(lambda x: '-'.join(x), axis=1)
df_wide = df_wide[wide_cols + [target] + ['IS_TRAIN']]
dummy_cols = [
c for c in wide_cols if c in categorical_columns + list(crossed_columns_d.keys())]
df_wide = pd.get_dummies(df_wide, columns=[x for x in dummy_cols])
train = df_wide[df_wide.IS_TRAIN == 1].drop('IS_TRAIN', axis=1)
test = df_wide[df_wide.IS_TRAIN == 0].drop('IS_TRAIN', axis=1)
assert all(train.columns == test.columns)
cols = [c for c in train.columns if c != target]
X_train = train[cols].values
y_train = train[target].values.reshape(-1, 1)
X_test = test[cols].values
y_test = test[target].values.reshape(-1, 1)
return X_train, y_train, X_test, y_test
def load_adult_data(return_val=True):
import pandas as pd
df_train, df_test = maybe_download("train.csv", "test.csv")
df_train['income_label'] = (
df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
df_test['income_label'] = (
df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
age_groups = [0, 25, 65, 90]
age_labels = range(len(age_groups) - 1)
df_train['age_group'] = pd.cut(
df_train['age'], age_groups, labels=age_labels)
df_test['age_group'] = pd.cut(
df_test['age'], age_groups, labels=age_labels)
# columns for wide model
wide_cols = ['workclass', 'education', 'marital_status', 'occupation',
'relationship', 'race', 'gender', 'native_country', 'age_group']
x_cols = (['education', 'occupation'], ['native_country', 'occupation'])
# columns for deep model
embedding_cols = ['workclass', 'education', 'marital_status', 'occupation',
'relationship', 'race', 'gender', 'native_country']
cont_cols = ['age', 'capital_gain', 'capital_loss', 'hours_per_week']
target = 'income_label'
x_train_wide, y_train_wide, x_test_wide, y_test_wide = wide(
df_train, df_test, wide_cols, x_cols, target)
x_train_wide = np.array(x_train_wide).astype(np.float32)
x_test_wide = np.array(x_test_wide).astype(np.float32)
print('Processing deep data')
df_train['IS_TRAIN'] = 1
df_test['IS_TRAIN'] = 0
df_deep =
|
pd.concat([df_train, df_test])
|
pandas.concat
|
import os
from scipy.io import loadmat
import numpy as np
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from datasets.SequenceDatasets import dataset
from datasets.sequence_aug import *
from tqdm import tqdm
from itertools import islice
#Digital data was collected at 12,000 samples per second
signal_size = 1024
work_condition=['_20_0.csv','_30_2.csv']
dataname= {0:[os.path.join('bearingset','health'+work_condition[0]),
os.path.join('gearset','Health'+work_condition[0]),
os.path.join('bearingset','ball'+work_condition[0]),
os.path.join('bearingset','outer'+work_condition[0]),
os.path.join('bearingset', 'inner' + work_condition[0]),
os.path.join('bearingset', 'comb' + work_condition[0]),
os.path.join('gearset', 'Chipped' + work_condition[0]),
os.path.join('gearset', 'Miss' + work_condition[0]),
os.path.join('gearset', 'Surface' + work_condition[0]),
os.path.join('gearset', 'Root' + work_condition[0]),
],
1:[os.path.join('bearingset','health'+work_condition[1]),
os.path.join('gearset','Health'+work_condition[1]),
os.path.join('bearingset','ball'+work_condition[1]),
os.path.join('bearingset','outer'+work_condition[1]),
os.path.join('bearingset', 'inner' + work_condition[1]),
os.path.join('bearingset', 'comb' + work_condition[1]),
os.path.join('gearset', 'Chipped' + work_condition[1]),
os.path.join('gearset', 'Miss' + work_condition[1]),
os.path.join('gearset', 'Surface' + work_condition[1]),
os.path.join('gearset', 'Root' + work_condition[1]),
]
}
label = [i for i in range(0, 9)]
def get_files(root, N):
'''
This function is used to generate the final training set and test set.
root:The location of the data set
'''
data = []
lab =[]
for k in range(len(N)):
for n in tqdm(range(len(dataname[N[k]]))):
path1 = os.path.join(root, dataname[N[k]][n])
if n==0:
data1, lab1 = data_load(path1, label=label[n])
else:
data1, lab1 = data_load(path1, label=label[n-1])
data += data1
lab +=lab1
return [data, lab]
def data_load(filename, label):
'''
This function is mainly used to generate test data and training data.
filename:Data location
axisname:Select which channel's data,---->"_DE_time","_FE_time","_BA_time"
'''
#--------------------
f = open(filename, "r", encoding='gb18030', errors='ignore')
fl = []
if "ball_20_0.csv" in filename:
for line in islice(f, 16, None): # Skip the first 16 lines
line = line.rstrip()
word = line.split(",", 8) # Separated by commas
fl.append(eval(word[1])) # Take a vibration signal in the x direction as input
else:
for line in islice(f, 16, None): # Skip the first 16 lines
line = line.rstrip()
word = line.split("\t", 8) # Separated by \t
fl.append(eval(word[1])) # Take a vibration signal in the x direction as input
#--------------------
fl = np.array(fl)
fl = fl.reshape(-1, 1)
# print(fl.shape())
data = []
lab = []
start, end = int(fl.shape[0]/2), int(fl.shape[0]/2)+signal_size
while end <= (int(fl.shape[0]/2)+int(fl.shape[0]/3)):
data.append(fl[start:end])
lab.append(label)
start += signal_size
end += signal_size
return data, lab
#--------------------------------------------------------------------------------------------------------------------
class Md(object):
num_classes = 9
inputchannel = 1
def __init__(self, data_dir, transfer_task, normlizetype="0-1"):
self.data_dir = data_dir
self.source_N = transfer_task[0]
self.target_N = transfer_task[1]
self.normlizetype = normlizetype
self.data_transforms = {
'train': Compose([
Reshape(),
Normalize(self.normlizetype),
# RandomAddGaussian(),
# RandomScale(),
# RandomStretch(),
# RandomCrop(),
Retype(),
# Scale(1)
]),
'val': Compose([
Reshape(),
Normalize(self.normlizetype),
Retype(),
# Scale(1)
])
}
def data_split(self, transfer_learning=True):
if transfer_learning:
# get source train and val
list_data = get_files(self.data_dir, self.source_N)
data_pd = pd.DataFrame({"data": list_data[0], "label": list_data[1]})
train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd["label"])
source_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])
source_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])
# get target train and val
list_data = get_files(self.data_dir, self.target_N)
data_pd = pd.DataFrame({"data": list_data[0], "label": list_data[1]})
train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd["label"])
target_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])
target_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])
return source_train, source_val, target_train, target_val
else:
#get source train and val
list_data = get_files(self.data_dir, self.source_N)
data_pd =
|
pd.DataFrame({"data": list_data[0], "label": list_data[1]})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import string
import pickle
import pandas as pd
from nltk import FreqDist
from nltk.stem import SnowballStemmer
from nltk.corpus import stopwords as StopWords
from nltk.tokenize import sent_tokenize, word_tokenize
import numpy as np
def normalize(words, language = 'english'):
# removes stopwords, lowercases, removes non-alphanumerics and stems (snowball)
# words: list of strings
# assert not isinstance(lst, basestring)
def ispunctuation(word):
punctuation = string.punctuation + "„“”—–"
for letter in word:
if not letter in punctuation:
return False
return True
stopwords = StopWords.words(language)
stemmer = SnowballStemmer(language)
#lowercase all terms
words = [w.lower() for w in words]
#remove stopwords
words = [w for w in words if not w in stopwords]
# stem (snowball)
words = [stemmer.stem(w) for w in words]
# remove all numerical terms
words = [w for w in words if not w.isnumeric()]
# remove pure punctuations
words = [w for w in words if not ispunctuation(w)]
#remove short words
words = [w for w in words if not len(w) < 3]
return words
def frequency_threshold(tokens, fqt = 10):
"""Return only those WORDS (i.e. unique wordforms) that appear more frequent than @fqt"""
fq = FreqDist(tokens)
fqt = fqt - 1
words = list( filter( lambda x: x[1] > fqt, fq.items() ) )
words = [item[0] for item in words]
return words
def remove_word_pairs_not_in_corpus(word_pairs, words, language = 'english'):
"""Only return those word pairs in @word_pairs of which both words are in @words
Expects @words to already be normalized"""
if not (word_pairs and words):
raise ValueError('Cannot remove word_pairs, array empty')
return_word_pairs = []
for pair in word_pairs:
pair_in_words = True
for word in pair:
word = normalize([word], language)[0]
if word not in words:
pair_in_words = False
if pair_in_words:
return_word_pairs.append(pair)
return return_word_pairs
def remove_words_not_in_list(word_list, words, language = 'english'):
"""Only return those strings of @word_list that are also in @words
Expects @words to already be normalized
"""
if not (word_list and words):
raise ValueError('Cannot remove word_pairs, array empty')
word_list = [w for w in word_list if normalize([w], language)[0] in words]
return word_list
def printprettymatrix(M, rns = None, cns = None, filename = None):
"""Prints a Matrix with row and columns labels
Matrix should be dense.
Arguments:
M -- Matrix to print
rns -- Row labels
cns -- Columnn labels
Optional plotz says to frobnicate the bizbaz first.
"""
df = pd.DataFrame(M, columns=cns, index=rns)
pd.set_option('display.max_columns', None)
|
pd.set_option('display.max_rows', None)
|
pandas.set_option
|
import pandas as pd
import transformers
import datasets
import shap
import pickle
dataset = datasets.load_dataset("emotion", split="train")
data =
|
pd.DataFrame({"text": dataset["text"], "emotion": dataset["label"]})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 29 13:56:29 2017
@author: ning
"""
import pandas as pd
import os
import numpy as np
#from sklearn.model_selection import StratifiedKFold,KFold
#from sklearn.pipeline import Pipeline
#from sklearn.preprocessing import StandardScaler
#from sklearn.linear_model import LogisticRegressionCV
#from sklearn.metrics import roc_curve,precision_recall_curve,auc,average_precision_score,confusion_matrix
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams.update({'font.size':22})
matplotlib.rcParams['legend.numpoints'] = 1
import seaborn as sns
sns.set_style('white')
try:
function_dir = 'D:\\NING - spindle\\Spindle_by_Graphical_Features'
os.chdir(function_dir)
except:
function_dir = 'C:\\Users\\ning\\OneDrive\\python works\\Spindle_by_Graphical_Features'
os.chdir(function_dir)
#import eegPipelineFunctions
try:
file_dir = 'D:\\NING - spindle\\training set\\road_trip\\'
# file_dir = 'D:\\NING - spindle\\training set\\road_trip_more_channels\\'
os.chdir(file_dir)
except:
file_dir = 'C:\\Users\\ning\\Downloads\\road_trip\\'
# file_dir = 'C:\\Users\\ning\\Downloads\\road_trip_more_channels\\'
os.chdir(file_dir)
def average(x):
x = x[1:-1].split(', ')
x = np.array(x,dtype=float)
return np.nanmean(x)
figsize = 6
signal_features_indivisual_results_RF=pd.read_csv(file_dir+'individual_signal_feature_RF.csv')
signal_features_indivisual_results_RF['clf']='Random Forest'
graph_features_indivisual_results_RF=
|
pd.read_csv(file_dir+'individual_graph_feature_RF.csv')
|
pandas.read_csv
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_preprocessing
----------------------------------
Tests for `preprocessing` module.
"""
import pytest
from sktutor.preprocessing import (GroupByImputer, MissingValueFiller,
OverMissingThresholdDropper,
ValueReplacer, FactorLimiter,
SingleValueAboveThresholdDropper,
SingleValueDropper, ColumnExtractor,
ColumnDropper, DummyCreator,
ColumnValidator, TextContainsDummyExtractor,
BitwiseOperator, BoxCoxTransformer,
InteractionCreator, StandardScaler,
PolynomialFeatures, ContinuousFeatureBinner,
TypeExtractor, GenericTransformer,
MissingColumnsReplacer)
from sktutor.pipeline import make_union
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from random import shuffle
from sklearn.pipeline import make_pipeline
@pytest.mark.usefixtures("missing_data")
@pytest.mark.usefixtures("missing_data2")
class TestGroupByImputer(object):
def test_groups_most_frequent(self, missing_data):
# Test imputing most frequent value per group.
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_mean(self, missing_data):
# Test imputing mean by group.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7 + 2/3, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_median(self, missing_data):
# Test imputing median by group.
prep = GroupByImputer('median', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 1.5, 4, 4, 4, 7, 9, 9, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_most_frequent(self, missing_data):
# Test imputing most frequent with no group by.
prep = GroupByImputer('most_frequent')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, 2, 4, 4, 7, 8, 2, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 4.0, 4.0, 4.0, 4.0, 7.0, 9.0, 4.0, 9.0],
'd': ['a', 'a', 'a', 'a', 'e', 'f', 'a', 'h', 'j', 'j'],
'e': [1, 2, 1, 1, 1, 1, 1, 1, 1, 1],
'f': ['a', 'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a'],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_mean(self, missing_data):
# Test imputing mean with no group by.
prep = GroupByImputer('mean')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 5, 5, 4, 4, 7, 8, 5, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 5, 4, 4, 4, 7, 9, 5, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_median(self, missing_data):
# Test imputing median with no group by.
prep = GroupByImputer('median')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 4, 4, 4, 4, 7, 8, 4, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 4, 4, 4, 4, 7, 9, 4, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_value_error(self, missing_data):
# Test limiting options without a group by.
prep = GroupByImputer('stdev')
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_key_error(self, missing_data):
# Test imputing with np.nan when a new group level is introduced in
# Transform.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
new_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
new_data = pd.DataFrame(new_dict)
# set equal to the expected for test means group
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7+2/3, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
result = prep.transform(new_data)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_most_frequent(self, missing_data2):
# Test most frequent with group by with 2 columns.
prep = GroupByImputer('most_frequent', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', 'a', 'e', 'e', 'f', 'f', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_mean(self, missing_data2):
# Test mean with group by with 2 columns.
prep = GroupByImputer('mean', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_median(self, missing_data2):
# Test median with group by with 2 columns.
prep = GroupByImputer('median', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict, index=new_index)
|
tm.assert_frame_equal(result, expected, check_dtype=False)
|
pandas.util.testing.assert_frame_equal
|
#/*##########################################################################
# Copyright (C) 2020-2021 The University of Lorraine - France
#
# This file is part of the PyRecon toolkit developed at the GeoRessources
# Laboratory of the University of Lorraine, France.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
# -*- coding: utf-8 -*-
from os.path import join
from pkgs.SpectraPreProcessing import *
import numpy as np
import pandas as pd
import os
import re
import json
'''
'''
class XRF(SpectraPreProcessing):
def __init__(self, AdressFolder, Name):
SpectraPreProcessing.__init__(self,AdressFolder)
self.Name = Name
def NameSpectrumColumns(self):
'''Returns List of tuples containing the names of each column '''
ColomnsName = ["KeV", "Main Range", "Low Range", "High Range", "Light Range"]
Name = [self.NameColumn(SampleName = self.Name, ColomnName = ["_",w]) for w in ColomnsName]
return Name
def Read_spectrum(self):
''' '''
File = self.FilePath(self.Name)
Sep = self.Separator(Extension = self.FileExtension(self.Name))
#df = pd.read_csv(File, header = 0, sep = Sep, index_col = False,low_memory=False) index_col
df = pd.read_csv(File, header = 0, sep = Sep, index_col = False,low_memory=False)
return df
#def Read_spectrums(self):
# ''' Returns Data Frame that concatenate all spectrums in the folder. '''
# global df
# Names = self.NameFiles()
# DF = [XRF(self.AdressFolder, Names[i]).Read_spectrum() for i in range(len(Names))]
# df = DF[0][DF[0].columns[0]].to_frame()
# for i in range(len(DF)):
# df = df.join(DF[i][DF[i].columns[1::]])
# return df
######################################### Extruction of Spectrums #########################################
def SelectXrfSamplesName(self):
'''
Return the sample names in Xrf spectrums
> DataFrame : is the imported csv file contining all xrf spectrums.
'''
DataFrame = self.Read_spectrum()
id_sample = [name[0:name.find("(")] for name in DataFrame.columns[1::]]
SamplesName = [item for item, count in collections.Counter(id_sample).items()]
return SamplesName
def SelectXrfColumnsName(self):
'''
Return list of extracted spectrums
> DataFrame : is the imported csv file contining all xrf spectrums.
> XrfColumnsName : the samples name generated by the function "SelectXrfColumnsName".
'''
DataFrame = self.Read_spectrum()
SamplesName = self.SelectXrfSamplesName()
NameColumns = list()
for i in range(len(SamplesName)):
NameColumns.append([name for name in DataFrame.columns.to_list() if name.startswith(SamplesName[i] + "(") ])
return NameColumns
def SelectXrfColumnsFilters(self):
'''
Ruturn the sample names in Xrf spectrums
'''
DataFrame = self.Read_spectrum()
SamplesName = self.SelectXrfSamplesName()
NameFIlters = list()
for i in range(len(SamplesName)):
NameFIlters.append([name[len(SamplesName[i])::] for name in DataFrame.columns.to_list() if name.startswith(SamplesName[i]+ "(") ])
return NameFIlters
def NameXRFSpectrumColumns(self,SamplesName,ColumnsFilters):
'''
list tuples for naming each xrf spectrum
'''
listOfTuples = [tuple([SamplesName,"_",w]) for w in ColumnsFilters]
return listOfTuples
def ExtractSpectrums(self):
'''
Return Liste of all Spectrums
'''
DataFrame = self.Read_spectrum()
SamplesName = self.SelectXrfSamplesName()
ColumnsName = self.SelectXrfColumnsName()
ColumnsFilters = self.SelectXrfColumnsFilters()
Names = [self.NameXRFSpectrumColumns(SamplesName[i],ColumnsFilters[i]) for i in range(len(SamplesName))]
first_column_name = pd.MultiIndex.from_tuples([("_","_","kev")])
first_column =
|
pd.DataFrame(DataFrame[DataFrame.columns[0]].values, columns=first_column_name)
|
pandas.DataFrame
|
from scipy.optimize import fsolve as _fsolve
from scipy import signal as _signal
import pandas as pd
import numpy as np
from scipy import stats
### Spectrum
def elevation_spectrum(eta, sample_rate, nnft, window='hann',
detrend=True, noverlap=None):
"""
Calculates the wave energy spectrum from wave elevation time-series
Parameters
------------
eta: pandas DataFrame
Wave surface elevation [m] indexed by time [datetime or s]
sample_rate: float
Data frequency [Hz]
nnft: integer
Number of bins in the Fast Fourier Transform
window: string (optional)
Signal window type. 'hann' is used by default given the broadband
nature of waves. See scipy.signal.get_window for more options.
detrend: bool (optional)
Specifies if a linear trend is removed from the data before
calculating the wave energy spectrum. Data is detrended by default.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg / 2``. Defaults to None.
Returns
---------
S: pandas DataFrame
Spectral density [m^2/Hz] indexed by frequency [Hz]
"""
# TODO: Add confidence intervals, equal energy frequency spacing, and NDBC
# frequency spacing
# TODO: may need an assert for the length of nnft- signal.welch breaks when nfft is too short
# TODO: check for uniform sampling
assert isinstance(eta, pd.DataFrame), 'eta must be of type pd.DataFrame'
assert isinstance(sample_rate, (float,int)), 'sample_rate must be of type int or float'
assert isinstance(nnft, int), 'nnft must be of type int'
assert isinstance(window, str), 'window must be of type str'
assert isinstance(detrend, bool), 'detrend must be of type bool'
assert nnft > 0, 'nnft must be > 0'
assert sample_rate > 0, 'sample_rate must be > 0'
S = pd.DataFrame()
for col in eta.columns:
data = eta[col]
if detrend:
data = _signal.detrend(data.dropna(), axis=-1, type='linear', bp=0)
[f, wave_spec_measured] = _signal.welch(data, fs=sample_rate, window=window,
nperseg=nnft, nfft=nnft, noverlap=noverlap)
S[col] = wave_spec_measured
S.index=f
S.columns = eta.columns
return S
def pierson_moskowitz_spectrum(f, Tp, Hs):
"""
Calculates Pierson-Moskowitz Spectrum from IEC TS 62600-2 ED2 Annex C.2 (2019)
Parameters
------------
f: numpy array
Frequency [Hz]
Tp: float/int
Peak period [s]
Hs: float/int
Significant wave height [m]
Returns
---------
S: pandas DataFrame
Spectral density [m^2/Hz] indexed frequency [Hz]
"""
try:
f = np.array(f)
except:
pass
assert isinstance(f, np.ndarray), 'f must be of type np.ndarray'
assert isinstance(Tp, (int,float)), 'Tp must be of type int or float'
assert isinstance(Hs, (int,float)), 'Hs must be of type int or float'
f.sort()
B_PM = (5/4)*(1/Tp)**4
A_PM = B_PM*(Hs/2)**2
Sf = A_PM*f**(-5)*np.exp(-B_PM*f**(-4))
col_name = 'Pierson-Moskowitz ('+str(Tp)+'s)'
S = pd.DataFrame(Sf, index=f, columns=[col_name])
return S
def jonswap_spectrum(f, Tp, Hs, gamma=None):
"""
Calculates JONSWAP Spectrum from IEC TS 62600-2 ED2 Annex C.2 (2019)
Parameters
------------
f: numpy array
Frequency [Hz]
Tp: float/int
Peak period [s]
Hs: float/int
Significant wave height [m]
gamma: float (optional)
Gamma
Returns
---------
S: pandas DataFrame
Spectral density [m^2/Hz] indexed frequency [Hz]
"""
try:
f = np.array(f)
except:
pass
assert isinstance(f, np.ndarray), 'f must be of type np.ndarray'
assert isinstance(Tp, (int,float)), 'Tp must be of type int or float'
assert isinstance(Hs, (int,float)), 'Hs must be of type int or float'
assert isinstance(gamma, (int,float, type(None))), \
'gamma must be of type int or float'
f.sort()
B_PM = (5/4)*(1/Tp)**4
A_PM = B_PM*(Hs/2)**2
S_f = A_PM*f**(-5)*np.exp(-B_PM*f**(-4))
if not gamma:
TpsqrtHs = Tp/np.sqrt(Hs);
if TpsqrtHs <= 3.6:
gamma = 5;
elif TpsqrtHs > 5:
gamma = 1;
else:
gamma = np.exp(5.75 - 1.15*TpsqrtHs);
# Cutoff frequencies for gamma function
siga = 0.07
sigb = 0.09
fp = 1/Tp # peak frequency
lind = np.where(f<=fp)
hind = np.where(f>fp)
Gf = np.zeros(f.shape)
Gf[lind] = gamma**np.exp(-(f[lind]-fp)**2/(2*siga**2*fp**2))
Gf[hind] = gamma**np.exp(-(f[hind]-fp)**2/(2*sigb**2*fp**2))
C = 1- 0.287*np.log(gamma)
Sf = C*S_f*Gf
col_name = 'JONSWAP ('+str(Hs)+'m,'+str(Tp)+'s)'
S = pd.DataFrame(Sf, index=f, columns=[col_name])
return S
### Metrics
def surface_elevation(S, time_index, seed=None, frequency_bins=None, phases=None):
"""
Calculates wave elevation time-series from spectrum
Parameters
------------
S: pandas DataFrame
Spectral density [m^2/Hz] indexed by frequency [Hz]
time_index: numpy array
Time used to create the wave elevation time-series [s],
for example, time = np.arange(0,100,0.01)
seed: int (optional)
Random seed
phases: numpy array or pandas DataFrame (optional)
Explicit phases for frequency components (overrides seed)
for example, phases = np.random.rand(len(S)) * 2 * np.pi
Returns
---------
eta: pandas DataFrame
Wave surface elevation [m] indexed by time [s]
"""
time_index = np.array(time_index)
assert isinstance(S, pd.DataFrame), 'S must be of type pd.DataFrame'
assert isinstance(time_index, np.ndarray), ('time_index must be of type'
'np.ndarray')
assert isinstance(seed, (type(None),int)), 'seed must be of type int'
assert isinstance(frequency_bins, (type(None), np.ndarray, pd.DataFrame)),(
"frequency_bins must be of type None, np.ndarray, or pd,DataFrame")
assert isinstance(phases, (type(None), np.ndarray, pd.DataFrame)), (
'phases must be of type None, np.ndarray, or pd,DataFrame')
if frequency_bins is not None:
assert frequency_bins.squeeze().shape == (S.squeeze().shape[0],),(
'shape of frequency_bins must match shape of S')
if phases is not None:
assert phases.squeeze().shape == S.squeeze().shape,(
'shape of phases must match shape of S')
f = pd.Series(S.index)
f.index = f
if frequency_bins is None:
delta_f = f.values[1]-f.values[0]
assert np.allclose(f.diff()[1:], delta_f)
elif isinstance(frequency_bins, np.ndarray):
delta_f = pd.Series(frequency_bins, index=S.index)
elif isinstance(frequency_bins, pd.DataFrame):
assert len(frequency_bins.columns) == 1, ('frequency_bins must only'
'contain 1 column')
delta_f = frequency_bins.squeeze()
if phases is None:
np.random.seed(seed)
phase = pd.DataFrame(2*np.pi*np.random.rand(S.shape[0], S.shape[1]),
index=S.index, columns=S.columns)
elif isinstance(phases, np.ndarray):
phase = pd.DataFrame(phases, index=S.index, columns=S.columns)
elif isinstance(phases, pd.DataFrame):
phase = phases
omega = pd.Series(2*np.pi*f)
omega.index = f
# Wave amplitude times delta f
A = 2*S
A = A.multiply(delta_f, axis=0)
A = np.sqrt(A)
# Product of omega and time
B = np.outer(time_index, omega)
B = B.reshape((len(time_index), len(omega)))
B = pd.DataFrame(B, index=time_index, columns=omega.index)
# wave elevation
eta =
|
pd.DataFrame(columns=S.columns, index=time_index)
|
pandas.DataFrame
|
import copy
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalent, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
import pandas.core.common as com
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
len(self.values), len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if copy:
values = values.copy()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, len(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.get_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.get(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(self.values.ravel(), dtype, copy=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return [self.copy()] if copy else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if isinstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
n = new[i] if isinstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def get_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(detail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.append(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalent(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
formatter = None
if float_format and decimal != '.':
formatter = lambda v : (float_format % v).replace('.',decimal,1)
elif decimal != '.':
formatter = lambda v : ('%g' % v).replace('.',decimal,1)
elif float_format:
formatter = lambda v : float_format % v
if formatter is None and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatter:
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (isinstance(element, (float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
elif isinstance(value, Timedelta):
value = value.value
elif isinstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif isinstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" Coerce values and other to float64, with null values converted to
NaN. values is always ndarray-like, other may not be """
def masker(v):
mask = isnull(v)
v = v.astype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
elif lib.isscalar(other):
other = np.float64(other)
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
#### FIXME ####
# should use the core.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
def get_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(self, datetime=True, numeric=True, timedelta=True, coerce=False,
copy=True, by_item=True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = com._possibly_convert_objects(
values.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = com._possibly_convert_objects(
self.values.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy
).reshape(self.values.shape)
blocks.append(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(datetime=True,
numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replace):
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replace, value):
blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replace:
blk[0], = blk[0]._replace_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
result = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter,
regex=regex)
if not isinstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
return [self.copy() if copy else self]
@property
def shape(self):
return (len(self.mgr_locs), len(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.fillna(value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(values=values.fillna(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def shift(self, periods, axis=0):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,len(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
isinstance(element, datetime) or
isnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
""" Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be """
values = values.view('i8')
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif isinstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
other = np.array(other, dtype='i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
return value
def fillna(self, value, limit=None,
inplace=False, downcast=None):
# straight putmask here
values = self.values if inplace else self.values.copy()
mask = isnull(self.values)
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
return [self if inplace else
make_block(values,
fastpath=True, placement=self.mgr_locs)]
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from pandas.core.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(values.view('i8').ravel(),
tz=None,
format=format,
na_rep=na_rep).reshape(values.shape)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def get_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, copy=False, fastpath=True):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return make_block(np.empty(values.shape, dtype=dtype),
placement, fastpath=True,)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return make_block(new_values, ndim=self.ndim,
fastpath=fastpath, placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = com.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.get_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' % (old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return com.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return com.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values,
mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4
and '0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(
unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = com.pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output +=
|
u('\n%s')
|
pandas.compat.u
|
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import math
from .davgis.functions import (Spatial_Reference, List_Datasets, Clip,
Resample, Raster_to_Array, NetCDF_to_Raster)
import os
import tempfile
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta,
epsg=4326, fill_val=-9999.0,
rasters_path_out=None, export_hants_only=False):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg, fill_val)
HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val)
#if rasters_path_out:
#export_tiffs(rasters_path_out, nc_path, name_format, export_hants_only)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg=4326, fill_val=-9999.0):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.strftime('%Y%m%d') for d in dates_dt]
ras_ls = List_Datasets(rasters_path, 'tif')
# Cell code
temp_ll_ls = [pd.np.arange(x, x + lon_n)
for x in range(1, lat_n*lon_n, lon_n)]
code_ls = pd.np.array(temp_ll_ls)
empty_vec = pd.np.empty((lat_n, lon_n))
empty_vec[:] = fill_val
# Create netcdf file
print('Creating netCDF file...')
nc_file = netCDF4.Dataset(nc_path, 'w', format="NETCDF4")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_n)
lon_dim = nc_file.createDimension('longitude', lon_n)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crs_var = nc_file.createVariable('crs', 'i4')
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.crs_wkt = spa_ref
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude'),
fill_value=fill_val)
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude'),
fill_value=fill_val)
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time'),
fill_value=fill_val)
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
code_var = nc_file.createVariable('code', 'i4', ('latitude', 'longitude'),
fill_value=fill_val)
outliers_var = nc_file.createVariable('outliers', 'i4',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
outliers_var.long_name = 'outliers'
original_var = nc_file.createVariable('original_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
original_var.long_name = 'original values'
hants_var = nc_file.createVariable('hants_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
hants_var.long_name = 'hants values'
combined_var = nc_file.createVariable('combined_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
combined_var.long_name = 'combined values'
print('\tVariables created')
# Load data
lat_var[:] = lat_ls
lon_var[:] = lon_ls
time_var[:] = dates_ls
code_var[:] = code_ls
# temp folder
temp_dir = tempfile.mkdtemp()
bbox = [lonlim[0], latlim[0], lonlim[1], latlim[1]]
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
# Raster
ras = name_format.format(dates_ls[tt])
if ras in ras_ls:
# Resample
ras_resampled = os.path.join(temp_dir, 'r_' + ras)
Resample(os.path.join(rasters_path, ras), ras_resampled, cellsize)
# Clip
ras_clipped = os.path.join(temp_dir, 'c_' + ras)
Clip(ras_resampled, ras_clipped, bbox)
# Raster to Array
array = Raster_to_Array(ras_resampled,
ll_corner, lon_n, lat_n,
values_type='float32')
# Store values
original_var[:, :, tt] = array
else:
# Store values
original_var[:, :, tt] = empty_vec
# Close file
nc_file.close()
print('NetCDF file created')
# Return
return nc_path
def HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+')
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[rows, cols, ztime] = original_values.shape
size_st = cols*rows
values_hants = pd.np.empty((rows, cols, ztime))
outliers_hants = pd.np.empty((rows, cols, ztime))
values_hants[:] = pd.np.nan
outliers_hants[:] = pd.np.nan
# Additional parameters
ni = len(time_var)
ts = range(ni)
# Loop
counter = 1
print('Running HANTS...')
for m in range(rows):
for n in range(cols):
print('\t{0}/{1}'.format(counter, size_st))
y = pd.np.array(original_values[m, n, :])
y[pd.np.isnan(y)] = fill_val
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta, fill_val)
values_hants[m, n, :] = yr
outliers_hants[m, n, :] = outliers
counter = counter + 1
nc_file.variables['hants_values'][:] = values_hants
nc_file.variables['outliers'][:] = outliers_hants
nc_file.variables['combined_values'][:] = pd.np.where(outliers_hants,
values_hants,
original_values)
# Close netcdf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta, fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time series.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r')
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][lat_i, lon_i, :]
# Additional parameters
ni = len(time)
ts = range(ni)
# HANTS
y = pd.np.array(original_values)
y[pd.np.isnan(y)] = fill_val
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta, fill_val)
# Plot
top = 1.15*max(pd.np.nanmax(original_values),
pd.np.nanmax(hants_values))
bottom = 1.15*min(pd.np.nanmin(original_values),
pd.np.nanmin(hants_values))
ylim = [bottom, top]
plt.plot(time, hants_values, 'r-', label='HANTS')
plt.plot(time, original_values, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.format(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netcdf file
nc_file.close()
# Data frame
df = pd.DataFrame({'time': time,
'original': original_values,
'hants': hants_values})
# Return
return df
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta, fill_val):
'''
This function applies the Harmonic ANalysis of Time Series (HANTS)
algorithm originally developed by the Netherlands Aerospace Centre (NLR)
(http://www.nlr.org/space/earth-observation/).
This python implementation was based on two previous implementations
available at the following links:
https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-series-applied-to-arrays
http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-series--hants-
'''
# Arrays
mat = pd.np.zeros((min(2*nf+1, ni), ni))
# amp = np.zeros((nf + 1, 1))
# phi = np.zeros((nf+1, 1))
yr = pd.np.zeros((ni, 1))
outliers = pd.np.zeros((1, len(y)))
# Filter
sHiLo = 0
if HiLo == 'Hi':
sHiLo = -1
elif HiLo == 'Lo':
sHiLo = 1
nr = min(2*nf+1, ni)
noutmax = ni - nr - dod
# dg = 180.0/math.pi
mat[0, :] = 1.0
ang = 2*math.pi*pd.np.arange(nb)/nb
cs = pd.np.cos(ang)
sn = pd.np.sin(ang)
i = pd.np.arange(1, nf+1)
for j in pd.np.arange(ni):
index = pd.np.mod(i*ts[j], nb)
mat[2 * i-1, j] = cs.take(index)
mat[2 * i, j] = sn.take(index)
p = pd.np.ones_like(y)
bool_out = (y < low) | (y > high)
p[bool_out] = 0
outliers[bool_out.reshape(1, y.shape[0])] = 1
nout = pd.np.sum(p == 0)
if nout > noutmax:
if pd.np.isclose(y, fill_val).any():
ready = pd.np.array([True])
yr = y
outliers = pd.np.zeros((y.shape[0]), dtype=int)
outliers[:] = fill_val
else:
raise Exception('Not enough data points.')
else:
ready = pd.np.zeros((y.shape[0]), dtype=bool)
nloop = 0
nloopmax = ni
while ((not ready.all()) & (nloop < nloopmax)):
nloop += 1
za = pd.np.matmul(mat, p*y)
A = pd.np.matmul(pd.np.matmul(mat, pd.np.diag(p)),
pd.np.transpose(mat))
A = A + pd.np.identity(nr)*delta
A[0, 0] = A[0, 0] - delta
zr = pd.np.linalg.solve(A, za)
yr = pd.np.matmul(pd.np.transpose(mat), zr)
diffVec = sHiLo*(yr-y)
err = p*diffVec
err_ls = list(err)
err_sort = deepcopy(err)
err_sort.sort()
rankVec = [err_ls.index(f) for f in err_sort]
maxerr = diffVec[rankVec[-1]]
ready = (maxerr <= fet) | (nout == noutmax)
if (not ready):
i = ni - 1
j = rankVec[i]
while ((p[j]*diffVec[j] > 0.5*maxerr) & (nout < noutmax)):
p[j] = 0
outliers[0, j] = 1
nout += 1
i -= 1
if i == 0:
j = 0
else:
j = 1
return [yr, outliers]
def export_tiffs(rasters_path_out, nc_path, name_format,
export_hants_only=False):
'''
This function exports the output of the HANTS analysis.
If 'export_hants_only' is False (default), the output rasters have the best
value available. Therefore, the cells in the output rasters will have the
original value for the cells that are not outliers and the hants values for
the cells that are outliers or the cells where data is not available.
If 'export_hants_only' is True, the exported rasters have the values
obtained by the HANTS algorithm disregarding of the original values.
'''
# Print
print('Exporting...')
# Create folders
if not os.path.exists(rasters_path_out):
os.makedirs(rasters_path_out)
# Read time data
nc_file = netCDF4.Dataset(nc_path, 'r')
time_var = nc_file.variables['time'][:]
nc_file.close()
# Output type
if export_hants_only:
variable_selected = 'hants_values'
else:
variable_selected = 'combined_values'
# Loop through netcdf file
for yyyymmdd in time_var:
print('\t{0}'.format(yyyymmdd))
output_name = rasters_path_out + os.sep + name_format.format(yyyymmdd)
NetCDF_to_Raster(input_nc=nc_path, output_tiff=output_name,
ras_variable=variable_selected,
x_variable='longitude', y_variable='latitude',
crs={'variable': 'crs', 'wkt': 'crs_wkt'},
time={'variable': 'time', 'value': yyyymmdd})
# Return
print('Done')
return rasters_path_out
def plot_point(nc_path, point, ylim=None):
'''
This function plots the original time series and the HANTS time series.
It can be used to assess the fit.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r')
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[
|
pd.np.abs(lon - lonx)
|
pandas.np.abs
|
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
# This is a simple example for a custom action which utters "Hello World!"
import re
import io
import ast
import requests
import numpy as np
import pandas as pd
import random
from typing import Any, Text, Dict, List, Union, Optional
from rasa_sdk import Action, Tracker
from rasa_sdk import FormValidationAction
from rasa_sdk.events import SlotSet, FollowupAction
from rasa_sdk.types import DomainDict
from rasa_sdk.executor import CollectingDispatcher
import warnings
from statistics import mean
from os import path, getenv
from datetime import datetime
import matplotlib.pyplot as plt
from botocore.exceptions import ClientError
from boto3.exceptions import S3UploadFailedError
import boto3
DB_AWS_ACCESS_KEY_ID = getenv('DB_AWS_ACCESS_KEY_ID')
DB_AWS_SECRET_ACCESS_KEY = getenv('DB_AWS_SECRET_ACCESS_KEY')
DB_AWS_BUCKET = 'journeypic'
# ------------------------------------------------------------------
def upload_file_to_s3(local_file, s3_folder, s3_file, aws_access_key_id, aws_secret_access_key, aws_bucket,
debug_en=False):
""" upload a given file to given location on Amazon-S3 """
success = True
HTTP_OK = 200
# Connect to Amazon-S3 client:
s3_client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
# Make a new directory on S3 (if not already exists):
if s3_folder + '/' in [x['Key'] for x in s3_client.list_objects(Bucket=aws_bucket)['Contents']]:
pass
elif not debug_en:
res = s3_client.put_object(Bucket=aws_bucket, Key='%s/' % s3_folder)
success = res['ResponseMetadata']['HTTPStatusCode'] == HTTP_OK
if not success:
return success, ""
# Upload local_file to S3:
x = 3
if not debug_en:
try:
if path.exists(local_file):
s3_client.upload_file(local_file, aws_bucket, path.join(s3_folder, s3_file))
s3_client.put_object_acl(ACL='public-read', Bucket=aws_bucket, Key=path.join(s3_folder, s3_file))
except (ClientError, S3UploadFailedError) as e:
success = False, ""
return success, "https://%s.s3.eu-central-1.amazonaws.com/%s/%s" % (aws_bucket, s3_folder, s3_file)
# ------------------------------------------------------------------
def donut_generator(names, sizes, radius=0.7, textstr_title='',
colors=None, figname="image.png"):
if colors is None:
colors = []
my_circle = plt.Circle((0, 0), radius, color='white')
fig, ax = plt.subplots()
labels = [':%s\nתוירולק %d' % (k1, k2) for k1, k2 in zip(names, sizes)]
if colors:
ax.pie(sizes, colors=colors)
else:
ax.pie(sizes)
plt.legend(bbox_to_anchor=(1.0, 0.88), fontsize=18, labels=labels)
p = plt.gcf()
p.gca().add_artist(my_circle)
if textstr_title:
ax.text(0.34, 1.05, textstr_title, transform=ax.transAxes, weight='bold',
fontsize=30, verticalalignment='center_baseline')
textstr_center1 = str(sum(sizes))
textstr_center2 = 'קלוריות'[::-1]
ax.text(0.39, 0.56, textstr_center1, transform=ax.transAxes, weight='bold',
fontsize=24, verticalalignment='center_baseline')
ax.text(0.37, 0.44, textstr_center2, transform=ax.transAxes,
fontsize=18, verticalalignment='center_baseline')
if figname:
fig.patch.set_facecolor('white')
fig.savefig(figname, bbox_inches='tight', facecolor='white')
else:
plt.show()
# ------------------------------------------------------------------
def donut_generator_wrapper(title, data):
names = [x[::-1] for x in list(data.keys())]
sizes = list(data.values())
colors = ['darkorange', 'lightgreen', 'blue']
textstr_title = title[::-1]
figname = "donut_image1.png"
donut_generator(names=names,
sizes=sizes,
radius=0.7,
textstr_title=textstr_title,
colors=colors,
figname=figname)
return figname
# ------------------------------------------------------------------
def iniliatize_Diagram(title, data):
unique_filename = lambda fname: "%s_%s%s" % (path.splitext(fname)[0],
datetime.now().strftime("%m%d%Y_%H%M%S"),
path.splitext(fname)[1])
figname = donut_generator_wrapper(title, data)
res, figure_url = upload_file_to_s3(local_file=figname,
s3_folder="auto_generated",
s3_file=unique_filename(figname),
aws_access_key_id=DB_AWS_ACCESS_KEY_ID,
aws_secret_access_key=DB_AWS_SECRET_ACCESS_KEY,
aws_bucket=DB_AWS_BUCKET)
return figure_url
# ------------------------------------------------------------------
def load_db(db_bitmap):
db_dict = {}
# "Zameret food list 22_JAN_2020"
if (db_bitmap & 0x1) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=84892416"
s = requests.get(url).content
db_dict['tzameret'] = pd.read_csv(io.StringIO(s.decode('utf-8'))).fillna(0)
# "Zameret_hebrew_features" - entities aliases
if (db_bitmap & 0x2) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1805881936"
s = requests.get(url).content
db_dict['lut'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Entity Alias"],
usecols=["Entity Alias", "Entity", "Units",
"Entity name", "RDA name",
"action_simple_question",
"action_nutrition_howmanyxiny_x",
"action_nutrition_howmanyxiny_y",
"action_nutrition_is_food_healthy",
"action_nutrition_is_food_recommended",
"action_nutrition_what_is_healthier_x",
"action_nutrition_what_is_healthier_y",
"action_nutrition_get_rda",
"action_nutrition_bloodtest_generic",
"action_nutrition_bloodtest_value",
"action_nutrition_food_substitute",
"action_nutrition_compare_foods",
"action_nutrition_howmanyxyinz"]).fillna(0)
# "Zameret_hebrew_features" - nutrients_questions
if (db_bitmap & 0x4) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1706335378"
s = requests.get(url).content
db_dict['nutrients_qna'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Entity"]).fillna(0)
# "Zameret_hebrew_features" - Food questions
if (db_bitmap & 0x8) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1099284657"
s = requests.get(url).content
db_dict['food_qna'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["nutrition_density"],
usecols=["nutrition_density", "energy_density",
"description_density"]).fillna(0)
# "Zameret_hebrew_features" - List of common foods
if (db_bitmap & 0x10) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=495295419"
s = requests.get(url).content
db_dict['common_food'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["common_name"],
usecols=["common_name", "shmmitzrach", "smlmitzrach"]).fillna(0)
# "Newt Machine Readable" - FoodItemRanges
if (db_bitmap & 0x20) > 0:
url = "https://docs.google.com/spreadsheets/d/1IPTflCe6shaP-FBAuXWSFCX5hSuAo7bMGczNMTSTYY0/export?format=csv&gid=885087351"
s = requests.get(url).content
db_dict['food_ranges'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Nutrient"],
usecols=["Nutrient", "Medium - threshold per 100gr",
"High - threshold per 100gr",
"good_or_bad", "tzameret_name", "hebrew_name"]).fillna(0)
# "Newt Machine Readable" - MicroNutrients
if (db_bitmap & 0x40) > 0:
url = "https://docs.google.com/spreadsheets/d/1IPTflCe6shaP-FBAuXWSFCX5hSuAo7bMGczNMTSTYY0/export?format=csv&gid=222801095"
s = requests.get(url).content
micro_nutrients_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0).fillna(0)
db_dict['micro_nutrients'] = micro_nutrients_df
# "Newt Machine Readable" - MicroNutrients
if (db_bitmap & 0x80) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1373096469"
s = requests.get(url).content
food_units_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0).fillna(0)
db_dict['food_units'] = food_units_df
# "Newt Machine Readable" - BloodTestValues
if (db_bitmap & 0x100) > 0:
url = "https://docs.google.com/spreadsheets/d/1IPTflCe6shaP-FBAuXWSFCX5hSuAo7bMGczNMTSTYY0/export?format=csv&gid=1011022304"
s = requests.get(url).content
bloodtest_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0, nrows=19, usecols=range(11)).fillna(0)
db_dict['bloodtest_vals'] = bloodtest_df
# "Zameret_hebrew_features" - Weight aliases
if (db_bitmap & 0x200) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=623521836"
s = requests.get(url).content
food_units_aliases_df = pd.read_csv(io.StringIO(s.decode('utf-8')), header=0)
db_dict['food_units_aliases'] = food_units_aliases_df
# "Zameret_hebrew_features" - For Noa
if (db_bitmap & 0x400) > 0:
url = "https://docs.google.com/spreadsheets/d/19rYDpki0jgGeNlKLPnINiDGye8QEfQ4IEEWSkLFo83Y/export?format=csv&gid=82221888"
s = requests.get(url).content
food_units_features_df = pd.read_csv(io.StringIO(s.decode('utf-8')), header=1)
db_dict['food_units_features'] = food_units_features_df.dropna(axis=0, how='all')
db_dict['food_units_features'] = db_dict['food_units_features'].rename({'Primary_SN': 'smlmitzrach'},
axis=1)
# "Zameret_hebrew_features" - subs_tags_alias
if (db_bitmap & 0x800) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=458428667"
s = requests.get(url).content
db_dict['subs_tags_alias'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
usecols=["Entity Alias", "Entity", "Show_stopers"]).set_index(
'Entity Alias')
return db_dict
# ------------------------------------------------------------------
def import_sheets(debug=False):
'''Import the df noa and tzameret food group tabs from the suggested meal planning sheet as a DataFrame. Import weights and measures, and tzameret food list from Tzameret DB as a DataFrame'''
sheet_id = '19rYDpki0jgGeNlKLPnINiDGye8QEfQ4IEEWSkLFo83Y'
# import seperalty
gid_2 = '428717261'
df_tzameret_food_group = pd.read_csv(
f"https://docs.google.com/spreadsheets/d/{sheet_id}/export?format=csv&gid={gid_2}")
df = load_db(0x481)
df_nutrition = df['tzameret']
df_nutrition.fillna(0, inplace=True)
df_nutrition.rename(columns={'carbohydrates': 'carbs'}, inplace=True)
df_weights = df['food_units']
df_weights.head()
df_noa_pre_1 = df['food_units_features']
df_noa = df['food_units_features']
header = list(df_noa_pre_1.columns.values)
df_noa.loc[-1] = header # adding a row
df_noa.index = df_noa.index + 1 # shifting index
df_noa = df_noa.sort_index() # sorting by index
df_noa.head()
df_noa.columns = df_noa.columns.str.lower()
df_noa = df_noa.iloc[1:] # df_noa doesn not have the first row with the numbers to make it easier to filter data
df_noa['lactose_free'] = df_noa['lactose_free'].replace({'Low Lactose': 'Yes', 'Lactose Free': 'Yes'})
df_noa['food_category'] = df_noa['food_category'].replace({'N/A': 'Savoury_Snacks'})
df_noa.dropna(subset=["food_name"],
inplace=True) # dropping all meals that don't have a meal name to get complete list of actual meals
df_noa = df_noa.rename(columns={'smlmitzrach': 'primary_sn'})
df_noa['sn_1'] = df_noa['primary_sn'].astype(str).str[:1]
df_noa['sn_2'] = df_noa['primary_sn'].astype(str).str[1:2]
return df_noa, df_tzameret_food_group, df_weights, df_nutrition
# ------------------------------------------------------------------
def get_rda(name, tracker, intent_upper=False):
db_dict = load_db(0x46)
lut_df = db_dict['lut']
micro_nutrients_df = db_dict['micro_nutrients']
if intent_upper:
micro_nutrients_df = micro_nutrients_df[micro_nutrients_df['Type'] == "Upper Limit"]
else:
micro_nutrients_df = micro_nutrients_df[micro_nutrients_df['Type'] == "RDA"]
status = "match"
if not (tracker.get_slot('gender') and tracker.get_slot('age') and tracker.get_slot(
'weight') and tracker.get_slot(
'height')):
status = "default"
nutrient = None
x = tracker.get_slot('x') if tracker.get_slot('x') else None
if x is not None and x is not "":
nutrient = x
else:
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[name].values:
nutrient = ent['value']
break
try:
feature = lut_df['Entity'][nutrient]
feature_rda = lut_df['RDA name'][lut_df['Entity name'] == feature][0]
gender = "Male"
if tracker.get_slot('gender') == "זכר":
gender = "Male"
elif tracker.get_slot('gender') == "נקבה":
gender = "Female"
user_vars = {}
user_vars['age'] = tracker.get_slot('age') if tracker.get_slot('age') else "40"
user_vars['weight'] = tracker.get_slot('weight') if tracker.get_slot('weight') else "80"
user_vars['height'] = tracker.get_slot('height') if tracker.get_slot('height') else "180"
rda_row = micro_nutrients_df[(micro_nutrients_df['Micronutrient'] == feature_rda) & \
((micro_nutrients_df['Gender'] == "ANY") | (
micro_nutrients_df['Gender'] == gender)) & \
((micro_nutrients_df['Pregnancy'] == "ANY") | (
micro_nutrients_df['Pregnancy'] == "No")) & \
((micro_nutrients_df['Lactating'] == "ANY") | (
micro_nutrients_df['Lactating'] == "No")) & \
((micro_nutrients_df['Age Min'] == "ANY") | (
micro_nutrients_df['Age Min'].astype(float) <= int(
user_vars['age']))) & \
((micro_nutrients_df['Age Max'] == "ANY") | (
micro_nutrients_df['Age Max'].astype(float) > int(user_vars['age'])))]
rda_text = str(rda_row['Free Text'].values[0])
rda_value = str(rda_row['Value'].values[0])
rda_units = rda_row['Units'].values[0]
if 'slot#' in rda_value:
rda_value_list = rda_value.split(' ')
for k, el in enumerate(rda_value_list):
if 'slot#' in el and el.split('#')[1] in user_vars:
rda_value_list[k] = user_vars[el.split('#')[1]]
rda_value = eval(' '.join(rda_value_list))
rda_value = float(rda_value)
if 'slot#' in rda_text:
rda_text_list = rda_text.split(' ')
for k, el in enumerate(rda_text_list):
if 'slot#' in el:
rda_text_list[k] = tracker.get_slot(el.split('#')[1])
rda_text = ' '.join(rda_text_list)
rda_text_list = re.findall('\{.*?\}', rda_text)
for match in rda_text_list:
rda_text = rda_text.replace(match, str(eval(match[1:-1])))
if rda_text == "0":
rda_text = ""
return rda_value, rda_units, rda_text, status, nutrient
except:
return -1, -1, "", "missmatch", nutrient
# ------------------------------------------------------------------
def get_personal_str(rda_status, tracker):
age = tracker.get_slot('age') if tracker.get_slot('age') and rda_status == "match" else '40'
gender = tracker.get_slot('gender') if tracker.get_slot('gender') and rda_status == "match" else 'זכר'
weight = tracker.get_slot('weight') if tracker.get_slot('weight') and rda_status == "match" else '80'
height = tracker.get_slot('height') if tracker.get_slot('height') and rda_status == "match" else '180'
if rda_status == "default":
personal_str = "עבור %s בגיל %s במשקל %s ובגובה %s" % (gender, age, weight, height)
else:
personal_str = "עבורך (%s בגיל %s במשקל %s ובגובה %s)" % (gender, age, weight, height)
return personal_str
# ------------------------------------------------------------------
def get_food_nutrition_density(food, food_ranges_db):
# Nutrition Density is defined in Tzameret:
density_normalized = float(food["Nutrition density normalized"])
# Thresholds are defined in Machine-Readable:
density = food_ranges_db[food_ranges_db.index == "Nutrition density"]
density_med = float(density["Medium - threshold per 100gr"])
density_high = float(density["High - threshold per 100gr"])
# Binning:
res = "high"
if density_normalized < density_med:
res = "low"
elif density_normalized < density_high:
res = "med"
return density, res
# ------------------------------------------------------------------
def get_food_energy_density(food, food_ranges_db):
# Energy Density is defined in Tzameret:
density_normalized = float(food["Energy density"])
# Thresholds are defined in Machine-Readable:
density = food_ranges_db[food_ranges_db.index == "Energy density"]
density_med = float(density["Medium - threshold per 100gr"])
density_high = float(density["High - threshold per 100gr"])
# Binning:
res = "high"
if density_normalized < density_med:
res = "low"
elif density_normalized < density_high:
res = "med"
return density, res
# ------------------------------------------------------------------
def how_many_x_in_y_core(x, y, food_units, name, tracker):
db_dict = load_db(0x293)
y_common = y
if y in db_dict['common_food'].index:
y_common = db_dict['common_food'][db_dict['common_food'].index == y]['shmmitzrach'][0]
else:
y_food = ' '.join(y.split(' ')[1:])
food_units = db_dict['food_units_aliases'][db_dict['food_units_aliases']['Unit Alias'] == y.split(' ')[0]][
'Zameret unit']
if food_units.empty:
food_units = y.split(' ')[0]
else:
food_units = food_units.values[0]
if y_food in db_dict['common_food'].index:
y_common = db_dict['common_food'][db_dict['common_food'].index == y_food]['shmmitzrach'][0]
else:
y_common = y_food
food = db_dict['tzameret'][db_dict['tzameret']['shmmitzrach'].str.contains(y_common)].iloc[0, :]
feature = db_dict['lut'][db_dict['lut'].index == x]["Entity"][0]
units = db_dict['lut'][db_dict['lut'].index == x]["Units"][0]
food_units_row = pd.Series()
if food_units:
food_units_row = db_dict['food_units'][(db_dict['food_units']['smlmitzrach'] == int(food['smlmitzrach'])) &
(db_dict['food_units']['shmmida'] == food_units)]
is_food_units_match = not food_units_row.empty or food_units == "100 גרם"
food_units_factor = 1.0
if not food_units_row.empty:
food_units_factor = food_units_row['mishkal'].values[0] / 100
val = food[feature] * food_units_factor
if units == 0:
res = "ב-%s של %s יש %.2f %s" % (food_units, food['shmmitzrach'], float(val), x)
else:
res = ""
if not is_food_units_match:
res = "לא הצלחתי למצוא נתונים במאגר על היחידה %s עליה שאלת\n" % food_units
res += "היחידות הבאות קיימות במאגר, עבור %s:\n" % food['shmmitzrach']
res += ', '.join(db_dict['food_units'][db_dict['food_units']['smlmitzrach'] == int(food['smlmitzrach'])][
'shmmida'].to_list())
res += "\n"
food_units = "100 גרם"
res += "ב-%s של %s יש %.2f %s %s" % (food_units, food['shmmitzrach'], float(val), units, x)
rda_val, rda_units, rda_text, rda_status, nutrient = get_rda(name, tracker)
if rda_val > 0 and units not in ['יחב"ל']: # FIXME: unsupported units
rda = 100 * float(val) / rda_val
res += "\n"
res += "שהם כ-%d אחוז מהקצובה היומית המומלצת %s" % (int(rda), get_personal_str(rda_status, tracker))
if rda_text and rda_text != '0':
res += '\n' + rda_text
return val, res
# ------------------------------------------------------------------
# ____ _ _ _ __ __ _
# | __ ) _ _(_) | __| | | \/ | ___ __ _| |
# | _ \| | | | | |/ _` | | |\/| |/ _ \/ _` | |
# | |_) | |_| | | | (_| | | | | | __/ (_| | |
# |____/ \__,_|_|_|\__,_| |_| |_|\___|\__,_|_|
# Dictionary that is equivalent to user inputs and filters the df_noa Database based on the inputs
def arrayToString(s):
return ' '.join([str(elem) for elem in s])
def checkDoublePattern(sentence, pattern):
temp = sentence.count(pattern)
if temp == 2:
return sentence[:sentence.find(pattern) + len(pattern)]
return sentence
def update_budgets(daily_budget, meals_num, snacks_num, weights):
'''Takes total budget, number of meals and snacks, and weights as paramters. Returns budget for each category for every meal'''
# change 0.3 to a user params
budgets = {}
div = (meals_num + inputs.get(
'budget_var') * snacks_num) # Is this supposed to be budget_var(0.3) times snacks num or budget_var times meals_num
if div > 0:
budgets['meal'] = round(daily_budget / div, 1)
budgets['snack'] = round(inputs.get('budget_var') * daily_budget / div, 1)
budgets['Carbs'] = round(weights[0] * budgets['meal'], 1)
budgets['Protein'] = round(weights[1] * budgets['meal'], 1)
budgets['Vegetables'] = round(weights[2] * budgets['meal'], 1)
budgets['Fruits'] = round(weights[3] * budgets['snack'], 1)
budgets['Fat'] = round(weights[4] * budgets['snack'], 1)
budgets['Fat_meal'] = round(weights[4] * budgets['meal'], 1)
budgets['Savoury_Snacks'] = round(weights[5] * budgets['snack'], 1)
budgets['Sweets'] = round(weights[6] * budgets['snack'], 1)
budgets['all'] = round(daily_budget, 1)
return budgets
def filter_meals_by_features(user_params, df_feature):
'''Takes user inputs and a Dataframe as parameters and returns a DataFrame filtered by the user inputs'''
for k, v in user_params.items():
if (v == 'Yes') and (debug['debug_en']):
df_feature = df_feature.loc[df_feature[k] == v]
return df_feature
def filter_meals_by_meal_type(df, meal_type):
'''Filters the DataFrame by the meal type to be used in making a scoreboard for each meal like breakfast, lunch etc.'''
if debug:
return df.loc[(df['il_' + meal_type] == 'Yes')]
def candidate_units_amounts(item, sn, items_type):
'''Returns the different options for mida amount and servings for each amount'''
sn_1 = int(item['sn_1'].values[0])
df_max_meal = df_tzameret_food_group.loc[df_tzameret_food_group['ספרה ראשונה בקוד'] == sn_1]
units_intersection = []
amounts_intersection = []
if items_type != 'snack':
df_max_meal = df_tzameret_food_group.loc[df_tzameret_food_group['ספרה ראשונה בקוד'] == sn_1]
max_amount_meal = df_max_meal['mida_maxAmount_meal'].values[0].replace(' ', '').split(',')
df_weights_list = df_weights[df_weights['smlmitzrach'] == sn]
weights_list = df_weights_list['mida'].tolist()
max_amount_meal_units = [int(value.split('_')[0]) for value in max_amount_meal]
max_amount_meal_amounts = [list(range(1, int(value.split('_')[1]) + 1)) for value in max_amount_meal]
for k, value in enumerate(max_amount_meal_units):
if value in weights_list:
units_intersection.append(value)
amounts_intersection.append(max_amount_meal_amounts[k])
else:
max_amount_snack = df_max_meal['mida_maxAmount_snack'].values[0].replace(' ', '').split(',')
df_weights_list = df_weights[df_weights['smlmitzrach'] == sn]
weights_list = df_weights_list['mida'].tolist()
max_amount_snack_units = [int(value.split('_')[0]) for value in max_amount_snack]
max_amount_snack_amounts = [list(range(1, int(value.split('_')[1]) + 1)) for value in max_amount_snack]
for k, value in enumerate(max_amount_snack_units):
if value in weights_list:
units_intersection.append(value)
amounts_intersection.append(max_amount_snack_amounts[k])
return units_intersection, amounts_intersection
def get_item_property(sn, grams, serving):
'''Returns the total item calories for each item'''
# if the mida is 700 then multiply by 100, if any other number divide by 100
weights = df_weights[(df_weights['smlmitzrach'] == sn) & (df_weights['mida'] == grams)]
mishkal = weights.iloc[0]['mishkal']
if mishkal == 700:
mishkal = mishkal * 100
else:
mishkal = mishkal / 100
attribute = df_nutrition.loc[df_nutrition['smlmitzrach'] == str(int(sn))]
attribute_total = attribute.iloc[0]['food_energy']
total = attribute_total * mishkal * serving
return total, weights.iloc[0]['shmmida'], weights.iloc[0]['mishkal'], weights, serving
def update_calorie_budgets(candidate_calories, item_type, bud):
'''Updates the calories budget based on how many calories were already used'''
bud[item_type] = bud[item_type] - candidate_calories
return bud
def build_meal(meals_bank, meal_type, budget):
# make histogram without penalty score of runnning the simulator 50 times and picking the winners. Run it again with the penalty score
'''Builds a meal taking a DataFrame, meal type and budget as parameters. Meal takes item from each category (Carbs, Protein etc.) and returns the meal, weighted average score and total meal calories'''
budget_weights = {**budget_weights_meals, **budget_weights_snacks_fruits_fat, **budget_weights_savoury_snacks,
**budget_weights_sweets}
bud = {}
meal_similarity_list = []
df_health = df_nutrition.iloc[1:]
max_meal_items = inputs.get('max_items_snack') if meal_type == 'snack' else inputs.get('max_items_meal')
nutrition_density_list = []
energy_density_list = []
meal_score = 0
score_list = []
uti_score = []
ind_score = []
score = 0
meals = []
meal_cals = 0
types = []
total_budget = budget.copy()
item_types = {'breakfast': ['Carbs', 'Protein', 'Vegetables'],
'lunch': ['Carbs', 'Protein', 'Vegetables'],
'dinner': ['Carbs', 'Protein', 'Vegetables'],
'snack': ['Fat']}
if (snacks.get('sweets') == 'Yes') & (len(meals_bank.loc[meals_bank['food_category'] == 'Sweets']) > 0):
item_types['snack'].append('Sweets')
if (snacks.get('Savoury_Snacks') == 'Yes') & (
len(meals_bank.loc[meals_bank['food_category'] == 'Savoury_Snacks']) > 0):
item_types['snack'].append('Savoury_Snacks')
if (user_params.get('fruits') == 'No') & (len(meals_bank.loc[meals_bank['food_category'] == 'Fruits']) > 0):
item_types['snack'].append('Fruits')
for k in range(max_meal_items):
for item_type in item_types[meal_type]:
success = False
if (len(meals_bank.loc[meals_bank['food_category'] == item_type]) > 0):
df = meals_bank.loc[meals_bank['food_category'] == item_type].sample()
candidate_units = candidate_units_amounts(df, int(df['primary_sn'].values[0]), item_type)
candidate_grams = candidate_units[0]
for can_grams in candidate_grams:
sn = float(df['primary_sn'].values[0])
for candidate_amount in candidate_units[1]:
for amount in reversed(candidate_amount):
calories, weight, grams, x, y = get_item_property(sn, can_grams, amount)
can_cals = getattr(calories, "tolist", lambda: candidate_calories)()
if can_cals < budget[item_type]:
success = True
if success:
if success:
sn_int = int(df['primary_sn'].astype(str).str[:1])
sn1 = float(df['primary_sn'].values[0])
calories1, weight, grams, x, y = get_item_property(sn1, can_grams, amount)
bud[item_type] = getattr(calories1, "tolist", lambda: candidate_calories)()
units_priority = candidate_grams.index(can_grams) + 1
meal_score += 1 / units_priority
df_sn1 = df_tzameret_food_group.loc[
df_tzameret_food_group['ספרה ראשונה בקוד'] == sn_int]
df_fish = df_noa.loc[df_noa['primary_sn'] == sn1]
food_group = df_sn1['קבוצת המזון']
if sn_int == 2:
if df_fish['fish_free'].iloc[0] == 'Yes':
meal_similarity_list.append(2.1)
else:
meal_similarity_list.append(2.2)
else:
meal_similarity_list.append(sn_int)
item_score = (bud[item_type]) / (budget[item_type])
df['score'] = item_score
score_list.append(item_score)
types.append(df['food_category'])
nutrition_density_normalized = df_nutrition.loc[
df_nutrition['smlmitzrach'] == str(
int(sn1)), 'Nutrition density normalized']
energy_density = df_health.loc[
df_health['smlmitzrach'] == str(int(sn1)), 'Energy density']
nutrition_density_normalized = nutrition_density_normalized.astype(float)
energy_density = energy_density.astype(float)
dataframe = df[['food_name', 'primary_sn']]
dataframe.insert(2, 'Weight', [grams])
dataframe.insert(3, 'Unit', [weight])
dataframe.insert(4, 'Amount', [amount])
meals.append(dataframe)
nutrition_density_list.append(nutrition_density_normalized.values.tolist())
energy_density_list.append(energy_density.values.tolist())
meal_cals = meal_cals + calories1
budget = update_calorie_budgets(can_cals, item_type, budget)
break
if success or budget[item_type] < units_thr[item_type] or len(meals) >= max_meal_items:
break
if success or budget[item_type] < type_thr[item_type] or len(meals) >= max_meal_items:
break
if budget['all'] < inputs['item_thr'] or len(meals) >= max_meal_items:
break
if len(meals) >= max_meal_items:
break
types_list_no_duplicates = np.unique([x.values[0] for x in types]).tolist()
for each_type in reversed(types_list_no_duplicates):
each_score = (float(total_budget.get(each_type)) - float(budget.get(each_type))) / float(
total_budget.get(each_type))
ind_score.append(each_score)
uti_score.append(budget_weights.get(each_type))
if (len(ind_score) < len(item_types[meal_type])):
ind_score.append(0.000001)
uti_score.append(.35)
if (min(ind_score) < 0.7) and (meal_type != 'snack'):
extra_penalty = inputs.get('extra_penalty')
else:
extra_penalty = 0
if (len(meals)) > 4:
meal_penalty_length = (len(meals) - 4) * inputs.get('meal_penalty_length')
else:
meal_penalty_length = 0
total_utilization = sum(x * y for x, y in zip(ind_score, uti_score)) / sum(uti_score)
if len(meal_similarity_list) != len(set(meal_similarity_list)):
meal_similarity_penalty = inputs.get('meal_similarity_penalty')
else:
meal_similarity_penalty = 0
nutrition_density_list = [float(x) for [x] in nutrition_density_list]
try:
avg_nutrition = round(mean(nutrition_density_list), 4)
except:
avg_nutrition = nutrition_density_list
energy_density_list = [float(x) for [x] in energy_density_list]
avg_energy = round(mean(energy_density_list), 4)
penalty_score = 1 - meal_score / len(meals)
nutrition_boost = avg_nutrition * inputs.get('nutrition_bonus')
energy_boost = avg_energy * inputs.get('energy_bonus')
if scoring.get('legacy'):
score = total_utilization - (
penalty_score * inputs.get('penalty_weight')) - extra_penalty - meal_penalty_length
elif scoring.get('legacy_nut'):
score = total_utilization - (penalty_score * inputs.get(
'penalty_weight')) - extra_penalty - meal_penalty_length + nutrition_boost
elif scoring.get('legacy_ene'):
total_utilization - (
penalty_score * inputs.get('penalty_weight')) - extra_penalty - meal_penalty_length + energy_boost
else:
score = total_utilization - (penalty_score * inputs.get(
'penalty_weight')) - extra_penalty - meal_penalty_length + energy_boost + nutrition_boost
return meals, score, meal_cals, ind_score, meal_penalty_length, avg_nutrition, avg_energy, meal_similarity_penalty, meal_similarity_list
def build_meal_wrapper():
energy_density_listy = 0.0
meal_similarity_listy = []
nutrition_density_listy = []
meal_similarity_penaltyy = []
nutrition_density_listx = []
energy_density_listx = 0.0
meal_similarity_penaltyx = []
meal_similarity_listx = []
penalty_lengthy = []
# Builds and populates a scoreboard that sorts the meals based on their score
x = -3
pd.set_option('precision', 2)
max_iterations = inputs.get('max_iter')
budget_weights = {**budget_weights_meals, **budget_weights_snacks_fruits_fat, **budget_weights_savoury_snacks,
**budget_weights_sweets}
budget_weights_list = []
for k, v in budget_weights.items():
budget_weights_list.append(v)
score_tracker = -2
total_cals = 0
meals = {}
user_meals_num = inputs.get('meals_num')
user_snacks_num = inputs.get('snacks_num')
filler = []
meal_types = ['breakfast', 'lunch', 'dinner']
for k in range(inputs.get('snacks_num')):
meal_types.append('snack')
features = filter_meals_by_features(user_params, df_noa)
for meal_type in meal_types:
bank = filter_meals_by_meal_type(features, meal_type)
x += 1
scoreboard = {}
for k in range(inputs.get('max_iter')):
budgets_dynamic = update_budgets(inputs.get('total_cals'), inputs.get('meals_num'),
inputs.get('snacks_num'), budget_weights_list)
meal_budget = update_budgets(inputs.get('total_cals'), inputs.get('meals_num'),
inputs.get('snacks_num'),
budget_weights_list)
if meal_type != 'snack':
mealy, scorey, calsy, ut_scorey, penalty_lengthy, nutrition_density_listy, energy_density_listy, meal_similarity_penaltyy, meal_similarity_listy = build_meal(
bank, meal_type, budgets_dynamic)
if mealy and scorey and min(ut_scorey) > 0:
scoreboard[meal_type] = mealy, scorey, calsy
if scoreboard[meal_type][1] > score_tracker:
score_tracker = scoreboard[meal_type][1]
total_cals = scoreboard[meal_type][2]
else:
mealx, scorex, calsx, ut_scorex, penalty_lengthx, nutrition_density_listx, energy_density_listx, meal_similarity_penaltyx, meal_similarity_listx = build_meal(
bank, meal_type, meal_budget)
if mealx:
scoreboard[
meal_type] = mealx, scorex, calsx, nutrition_density_listx, energy_density_listx, meal_similarity_penaltyx, meal_similarity_listx
if scoreboard:
meals[meal_type] = scoreboard[meal_type]
for meal_name, whole_meal in scoreboard.items():
df = pd.concat(whole_meal[0])
df = pd.DataFrame(df.values.reshape(1, -1))
df['score'] = float(scoreboard[meal_type][1])
df['meal_cals'] = scoreboard[meal_type][2]
if meal_name != 'snack':
df['name'] = meal_name
df['budget per meal'] = meal_budget.get('meal')
df['meal budget utilization'] = (df['meal_cals'] / df['budget per meal'])
df['average nutrtition'] = nutrition_density_listy
df['average energy'] = energy_density_listy
df['meal_similarity_penalty'] = meal_similarity_penaltyy
df['meal_similarity_list'] = pd.Series([meal_similarity_listy])
df.set_index('name', drop=True, inplace=True)
else:
df['name'] = meal_name + " " + str(x)
df['budget per snack'] = budgets_dynamic.get('snack')
df['snack budget utilization'] = (df['meal_cals'] / df['budget per snack'])
df['average nutrtition'] = nutrition_density_listx
df['average energy'] = energy_density_listx
df['meal_similarity_penalty'] = meal_similarity_penaltyx
df['meal_similarity_list'] = pd.Series([meal_similarity_listx])
df.set_index('name', drop=True, inplace=True)
if meal_name != 'snack':
# rename all the budget as budget leftover so its carbs budget leftover etc.
df['meal penalty length'] = penalty_lengthy
df['carb budget per meal'] = int(meal_budget.get('Carbs'))
df['carbs budget remaining'] = budgets_dynamic.get('Carbs')
df['carb budget utilization'] = (meal_budget.get('Carbs') - budgets_dynamic.get(
'Carbs')) / meal_budget.get('Carbs')
df['protein budget per meal'] = meal_budget.get('Protein')
df['protein budget remaining'] = budgets_dynamic.get('Protein')
df['protein budget utilization'] = (meal_budget.get('Protein') - budgets_dynamic.get(
'Protein')) / meal_budget.get('Protein')
df['vegetable budget per meal'] = meal_budget.get('Vegetables')
df['vegetable budget remaining'] = budgets_dynamic.get('Vegetables')
df['vegetable budget utilization'] = (meal_budget.get('Vegetables') - budgets_dynamic.get(
'Vegetables')) / meal_budget.get('Vegetables')
df['fat budget per meal'] = meal_budget.get('Fat_meal')
df['fat budget remaining'] = budgets_dynamic.get('Fat_meal')
df['fat budget utilization'] = (meal_budget.get('Fat_meal') - budgets_dynamic.get(
'Fat_meal')) / meal_budget.get('Fat_meal')
else:
if snacks.get('sweets') == "Yes":
df['sweets budget per snack'] = meal_budget.get('Sweets')
df['sweets budget remaining'] = budgets_dynamic.get('Sweets')
df['sweets budget utilization'] = (meal_budget.get('Sweets') - budgets_dynamic.get(
'Sweets')) / meal_budget.get('Sweets')
if snacks.get('Savoury_Snacks') == 'Yes':
df['savoury budget per snack'] = meal_budget.get('Savoury_Snacks')
df['savoury budget remaining'] = budgets_dynamic.get('Savoury_Snacks')
df['savoury budget utilization'] = (meal_budget.get('Savoury_Snacks') - budgets_dynamic.get(
'Savoury_Snacks')) / meal_budget.get('Savoury_Snacks')
if user_params.get('fruits') == 'No':
df['fruits budget per snack'] = meal_budget.get('Fruits')
df['fruits budget remaining'] = budgets_dynamic.get('Fruits')
df['fruits budget utilization'] = (meal_budget.get('Fruits') - budgets_dynamic.get(
'Fruits')) / meal_budget.get('Fruits')
df['fat budget per snack'] = meal_budget.get('Fat')
df['fat budget remaining'] = budgets_dynamic.get('Fat')
df['fat budget utilization'] = (meal_budget.get('Fat') - budgets_dynamic.get(
'Fat')) / meal_budget.get('Fat')
filler.append(df)
if meal_type == 'snack':
user_snacks_num -= 1
else:
user_meals_num -= 1
budgets_dynamic = update_budgets(float(inputs.get('total_cals') - total_cals), user_meals_num, user_snacks_num,
budget_weights_list)
df_meals = pd.concat(filler)
df_final = df_meals.sort_values(by=['name', 'score'], ascending=[True, False])
df_final.rename(columns={0: "Item 1", 1: "Primary SN 1", 2: "Weight", 3: "Unit1", 4: "Amount1",
5: "Item 2", 6: "Primary SN 2", 7: "Weight", 8: "Unit2", 9: "Amount2",
10: "Item 3", 11: "Primary SN 3", 12: "Weight", 13: "Unit3", 14: "Amount3",
15: "Item 4", 16: "Primary SN 4", 17: "Weight", 18: "Unit4", 19: "Amount4"}
, inplace=True)
return df_final
def displayMeal(data, mealType, items_meal_number, sncack_numbers):
menu = ""
calories = 0
# hole day menu
carbs = 0
protein = 0
vegetable = 0
if len(mealType) > 1:
for meal in mealType:
items, temp_calories, temp_carbs, temp_protein, temp_vegetable = getMeal(data, meal, items_meal_number)
calories += temp_calories
menu = menu + items
carbs = carbs + temp_carbs
protein = protein + temp_protein
vegetable = vegetable + temp_vegetable
# one meal for the user
else:
menu, calories, carbs, protein, vegetable = getMeal(data, mealType[0], items_meal_number)
return menu, carbs, protein, vegetable
snacks, calories_sn = getSnack(data, sncack_numbers)
menu = menu + snacks
calories += calories_sn
menu = menu + "סך הכל קלוריות -> " + arrayToString(str(calories))
return menu, carbs, protein, vegetable
def getMeal(data, meal_type, meal_items_nubmer):
# item[0]-> food name
# item[1]-> unit
# item[2]-> amount
dic = {'breakfast': 'ארוחת בוקר', 'lunch': 'ארוחת צהריים', 'dinner': 'ארוחת ערב'}
temp_meal = data[data.index == meal_type]
items = get_items(temp_meal, meal_items_nubmer)
calories = temp_meal['meal_cals'].head(1).values
# calulate the Nutritional values of the meal
carbs = temp_meal['carb budget per meal'].head(1).values * temp_meal['carb budget utilization'].head(1).values
protein = temp_meal['protein budget per meal'].head(1).values * temp_meal['protein budget utilization'].head(
1).values
vegetables = temp_meal['vegetable budget per meal'].head(1).values * temp_meal['vegetable budget utilization'].head(
1).values
carbs = int(carbs)
protein = int(protein)
vegetables = int(vegetables)
calories = int(calories)
if meal_items_nubmer == 4:
return "*" + dic[meal_type] + "*:\n1. " + buildItem(items['item1']) + "\n2. " + buildItem(
items["item2"]) + "\n3. " + buildItem(
items['item3']) + "\n4. " + buildItem(
items['item4']) + "\nכמות קלוריות ->" + str(calories) + "\n\n", calories, carbs, protein, vegetables
return "*" + dic[meal_type] + "*:\n1. " + buildItem(items['item1']) + "\n2. " + buildItem(
items["item2"]) + "\n3. " + buildItem(
items['item3']) + "\nכמות קלוריות ->" + str(calories) + "\n\n", calories, carbs, protein, vegetables
def get_items(temp_meal, items_number):
meal = {}
for index in range(1, items_number + 1):
meal['item' + str(index)] = [temp_meal['Item ' + str(index)].head(1).values,
temp_meal['Unit' + str(index)].head(1).values,
temp_meal['Amount' + str(index)].head(1).values]
return meal
def getSnack(snackData, snack_number):
# get the line of each snack
snack1 = snackData[snackData.index == "snack 1"]
snack2 = snackData[snackData.index == "snack 2"]
# get the items
snack1_ = get_items(snack1, snack_number)
snack2_ = get_items(snack2, snack_number)
snack1_calories = snack1['meal_cals'].head(1).values
snack2_calories = snack2['meal_cals'].head(1).values
snack1_calories = int(snack1_calories)
snack2_calories = int(snack2_calories)
if snack_number == 2:
return "*ארוחות ביניים 1*:\n1. " + buildItem(snack1_['item1']) + "\n2. " + buildItem(
snack1_['item2']) + "\n*ארוחות ביניים 2*:\n1." + buildItem(snack2_['item1']) + "\n2. " + buildItem(
snack2_['item2']) + "\nכמות קלוריות -> " + str(
snack1_calories + snack2_calories) + "\n\n", snack1_calories + snack2_calories
return "*ארוחות ביניים *:\n1. " + buildItem(snack1_['item1']) + "\n2. " + buildItem(
snack2_['item1']) + "\nכמות קלוריות -> " + str(
snack1_calories + snack2_calories) + "\n\n", snack1_calories + snack2_calories
def buildItem(item):
if item[0] is not 'NaN' and item[2] is not 'Nan':
return arrayToString(item[0]) + " " + arrayToString(item[2]) + " " + arrayToString(
unitHebrew(arrayToString(item[1]), item[2]))
def unitHebrew(unit, amount):
unit_dic = {"כף": 'כפות', "מנה": 'מנות', "יחידה קטנה": 'יחידות קטנות', "פרח": 'פרחים',
"פרוסה בינונית": 'פרוסות בינונוית',
"יחידה": 'יחידות', "כף גדושה": 'כפות גדושות',
"פרוסה": 'פרוסות', "מנה קטנה": 'מנות קטנות', "יחידה בינונית": 'יחידות בינונוית', "כפית": 'כפיות',
"כוס": 'כוסות', "כוס קצוץ": 'כוסות'}
if unit not in unit_dic:
return unit
if amount > 1:
unit_temp = unit_dic[unit].strip()
if unit_temp.count(' ') == 1:
return unit_temp
unit_temp = unit_temp.replace(' ', '')
unit_temp = unit_temp[:unit_temp.find('ת') + 1] + ' ' + unit_temp[unit_temp.find('ת') + 1:]
# one word
if unit_temp.count('ת') == 1:
return unit_temp.strip()
return unit_temp
return unit
def core_fun(meal_type, title=""):
global snacks, user_params, units_thr, type_thr, budget_weights_meals, budget_weights_snacks_fruits_fat, budget_weights_savoury_snacks, budget_weights_sweets, inputs, display_user_parameter, debug
global user_meals_num, total_cals, user_snacks_num, candidate_calories, scoring
global df_noa, df_tzameret_food_group, df_weights, df_nutrition
pd.set_option("display.precision", 2)
warnings.filterwarnings("ignore")
# Dictionary that is equivalent to user inputs and filters the df_noa Database based on the inputs
user_params = {'eggs': 'No', # If eggs = Yes, filters out all the meals with eggs
'vegetables': 'No', # If vegetables = Yes, fiters out all meals with vegetables
'fruits': 'No',
# If fruits = Yes, filters out all snacks and meals with fruits and snacks don't have fruits as a category
'dairy': 'No', # If dairy = Yes, filters out all the dairy items
'beef_chicken_fish': 'No',
# If beef_chicken_fish = Yes, filters out all the meals with beef chicken or fish
# For remaining if Yes, filters only the food its for (i.e if kosher = Yes, only shows kosher food)
'kosher': 'Yes',
'halal': 'Yes',
'vegetarian': 'No',
'vegan': 'No',
'ketogenic': 'No',
'paleo': 'No',
'mediterranean': 'No',
'lactose_free': 'No',
'gluten_free': 'No',
'milk_free': 'No',
'wheat_free': 'No',
'egg_free': 'No',
'soy_free': 'No',
'tree_nut_free': 'No',
'peanut_free': 'No',
'fish_free': 'No',
'shellfish_free': 'No'}
# Dictionary to see if want to add certain snack elements to the snacks on the scoreboard
snacks = {'sweets': 'No',
'Savoury_Snacks': 'Yes'}
# Threshold for the build meal to stop looking for another item (If there are only 20 Carb calories left the meal exits the Carb code and moves to Protein):
units_thr = {'Carbs': 25,
'Protein': 10,
'Vegetables': 10,
'Fat': 25,
'Fruits': 25,
'Sweets': 25,
'Savoury_Snacks': 25}
# Another threshold for build meal to stop looking for another item in the category if there is less budget than the threshold:
type_thr = {'Carbs': 25,
'Protein': 10,
'Vegetables': 10,
'Fat': 25,
'Fruits': 25,
'Sweets': 25,
'Savoury_Snacks': 25}
# For snacks its either fruits and fat or savoury or sweets
budget_weights_meals = {'Carbs': 0.4,
'Protein': 0.5,
'Vegetables': 0.2}
budget_weights_snacks_fruits_fat = {'Fruits': 0.7,
'Fat': 0.4}
budget_weights_savoury_snacks = {'Savoury_Snacks': 1.1}
budget_weights_sweets = {'Sweets': 1.1}
scoring = {'legacy': False, # legacy scoring system composed of budget utilization
'legacy_nut': True, # legacy scoring system with a bonus based on average nutritional density
'legacy_ene': False, # legacy scroing system with a bonus based on higher energy density
'legacy_nut_ene': False
# legacy scoring system with a bonus based on nutrtion density and energy density with higher density the better
}
# User inputs that control different variables:
inputs = {'for_noa_gid': 2106834268,
# Gid that controls which for noa tab is shown, to switch just paste another Gid
'budget_var': 0.3, # Budget variable to see the weighting for snacks and individual meals
'item_thr': 4,
# Threshold used to decided when to break code if there is less than 5 total budget left
'max_items_meal': 4, # Max amount of items per meal
'max_items_snack': 2, # Max amount of items per snack
'penalty_weight': 1,
# Penalty weight for the meal score if the meal doesnt take the first option at the intersection of mida max amount meal
'nutrition_bonus': 0.1, # Bonus multiplier for the average nutrition density
'energy_bonus': 0.2, # Bonus multiplier for the average energy density
'meal_similarity_penalty': 0.3,
# Penalty for having mutliple of the same category of meal items in the same meal
'max_iter': 7, # Number of meals for each meal type in the scoreboard
'meals_num': 3, # Number of different meal types and meals - will always be 3
'snacks_num': 2, # number of snacks in the final scoreboard
'meat_egg_same_day_penalty': 0.2,
# Peanlty if the top meal has eggs or meat and another meal the same day also has eggs and meat
'extra_penalty': 0.2, # Penalty if there is less than 0.7 of each category for the budget is used
'meal_penalty_length': 0.1,
# Penalty given if a meal is longer than 4 items and this is the weighting
'total_cals': 2000 # total calories in the budget for the day
}
debug = {'debug_en': True} # Used for finding bugs in code. Set to True for code to run properly
# Toggle to show the user values in a DataFrame
display_user_parameter = {'display_user_parameter': False}
df_noa, df_tzameret_food_group, df_weights, df_nutrition = import_sheets(False)
df_main = build_meal_wrapper()
items, carbs, protein, vegetable = displayMeal(df_main, meal_type, inputs['max_items_meal'],
inputs['max_items_snack'])
data = {'חלבון': protein,
'פחמימות': carbs,
'ירקות': vegetable}
url = iniliatize_Diagram(title, data)
return items, url
# ------------------------------------------------------------------
class Actionhowmanyxyinz(Action):
def name(self) -> Text:
return "action_nutrition_howmanyxyinz"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
user_msg = tracker.latest_message.get('text')
two_nutrient = None
z = None
db_dict = load_db(0x293)
prediction = tracker.latest_message
two_nutrient = prediction['entities'][0]['value']
x, y = two_nutrient.split(' ו')
x = x.strip()
y = y.strip()
regex_res = re.search('כמה (.*) יש ב(.*)', user_msg.replace('?', ''))
if regex_res:
if two_nutrient is None:
x, y = regex_res.group(1)
x = x.strip()
y = y.strip()
z = regex_res.group(2)
regex_res = re.search('כמה (.*) ב(.*)', user_msg.replace('?', ''))
if regex_res:
if two_nutrient is None:
x, y = regex_res.group(1)
x = x.strip()
y = y.strip()
z = regex_res.group(2)
regex_res = re.search('מה הכמות של (.*) ב(.*)', user_msg.replace('?', ''))
if regex_res:
if two_nutrient is None:
x, y = regex_res.group(1)
x = x.strip()
y = y.strip()
z = regex_res.group(2)
y = y[:len(y)]
# get the units from the user message
user_msg_temp = user_msg[user_msg.find(two_nutrient) + len(two_nutrient) + 1:len(user_msg)].replace('?', '')
food1_units = "100 גרם"
regex_units_res1 = re.search('ב(.*) של', user_msg_temp)
regex_units_res2 = re.search(' (.*) של', user_msg_temp)
if regex_units_res1:
food1_units = regex_units_res1.group(1)
elif regex_units_res2:
food1_units = regex_units_res2.group(1)
if food1_units in db_dict['food_units_aliases']['Unit Alias'].values:
food1_units = db_dict['food_units_aliases'][db_dict['food_units_aliases']['Unit Alias'] == food1_units][
'Zameret unit'].values[0]
if True:
val1, res1 = how_many_x_in_y_core(x, z, food1_units, self.name(), tracker)
val2, res2 = how_many_x_in_y_core(y, z, food1_units, self.name(), tracker)
res1 = checkDoublePattern(res1, 'קלוריות')
res2 = checkDoublePattern(res2, 'קלוריות')
res = ''
res += res1
res += "\n"
res += res2
else:
res = "אין לי מושג כמה, מצטער!"
dispatcher.utter_message(res)
# ------------------------------------------------------------------
class Actioncompartiontwofoods(Action):
def name(self) -> Text:
return "action_nutrition_compare_foods"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
user_msg = tracker.latest_message.get('text')
entities = tracker.latest_message.get('entities')
x = None
y1 = None
y2 = None
more_or_less = 'יותר' if 'יותר' in user_msg else 'פחות'
db_dict = load_db(0x293)
for ent in entities:
if ent['entity'] in db_dict['lut']["action_nutrition_compare_foods"].values:
x = ent['value']
elif ent['entity'] in db_dict['lut']["action_nutrition_compare_foods"].values:
y1, y2 = ent['value'].split('או')
y1 = y1.strip()
y2 = y2.strip()
if not y1 or not y2:
y1, y2 = user_msg[user_msg.find(x) + len(x):len(user_msg)].split('או')
y1 = y1.strip()
y1 = y1[1:len(y1)]
y2 = y2.strip()
if 'בב' in y1:
y1 = y1[1:len(y1)]
if not y1 or not y2:
regex_res = re.search('במה יש (פחות|יותר) .* ב(.*)', user_msg.replace('?', ''))
if regex_res:
more_or_less = regex_res.group(1)
y1, y2 = regex_res.group(2).split('או')
y1 = y1.strip()
y2 = y2.strip()
food1_units = "100 גרם"
food2_units = "100 גרם"
for k, y in enumerate([y1, y2]):
regex_units_res = re.search('(.*) של (.*)', y)
if regex_units_res:
if k == 0:
food1_units = regex_units_res.group(1)
y1 = regex_units_res.group(2)
else:
food2_units = regex_units_res.group(1)
y2 = regex_units_res.group(2)
if food1_units in db_dict['food_units_aliases']['Unit Alias'].values:
food1_units = db_dict['food_units_aliases'][db_dict['food_units_aliases']['Unit Alias'] == food1_units][
'Zameret unit'].values[0]
if food2_units in db_dict['food_units_aliases']['Unit Alias'].values:
food2_units = db_dict['food_units_aliases'][db_dict['food_units_aliases']['Unit Alias'] == food2_units][
'Zameret unit'].values[0]
try:
val1, res1 = how_many_x_in_y_core(x, y1, food1_units, self.name(), tracker)
val2, res2 = how_many_x_in_y_core(x, y2, food1_units, self.name(), tracker)
ys = (y1, y2)
vals = (val1, val2)
res = 'ב%s יש %s %s' % (ys[np.argmax(vals) if more_or_less == 'יותר' else np.argmin(vals)], more_or_less, x)
res += "\n"
res += res1
res += "\n"
res += res2
except:
res = "אין לי מושג כמה, מצטער!"
dispatcher.utter_message(res)
# ------------------------------------------------------------------
class Actionwhataboutx(Action):
def name(self) -> Text:
return "action_nutrition_and_what_about_x"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# get the right actions according to the intent
intens_dict = {"nutrition_howmanyxiny": "action_nutrition_howmanyxiny",
"nutrition_meal_question": "action_nutrition_meal_question",
"nutrition_is_food_healthy": "action_nutrition_is_food_healthy",
"nutrition_get_rda": "action_nutrition_get_rda",
"nutrition_get_upper_limit": "action_nutrition_get_rda"}
user_messge = tracker.latest_message.get('text')
previous_intent = tracker.get_slot('previous_intent')
try:
next_action = intens_dict[previous_intent]
# meal question
if previous_intent == "nutrition_meal_question":
return [FollowupAction(next_action), SlotSet("y", ""),
SlotSet("x", user_messge), SlotSet("previous_intent", previous_intent)]
# ------------------------------------------------
# how many x in y
if previous_intent == "nutrition_howmanyxiny":
db_dict = load_db(0x2)
lut_df = db_dict['lut']
action_name = "action_nutrition_howmanyxiny"
y = None
x = None
# get the values from the slots
food = tracker.get_slot('y') if tracker.get_slot('y') else None
nutriet = tracker.get_slot('x') if tracker.get_slot('x') else None
# get the entities from the question
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[action_name + "_x"].values:
x = ent['value']
elif ent['entity'] in lut_df[action_name + "_y"].values:
y = ent['value']
if x is None or x == "":
x = nutriet
if y is None or y == "":
y = food
return [FollowupAction(next_action),
SlotSet("x", x), SlotSet("y", y),
SlotSet("previous_intent", previous_intent)]
# ------------------------------------------------
# is x healthy
if previous_intent == "nutrition_is_food_healthy":
prediction = tracker.latest_message
entity_value = prediction['entities'][0]['value']
return [FollowupAction(next_action),
SlotSet("x", entity_value), SlotSet("y", ""),
SlotSet("previous_intent", previous_intent)]
# ------------------------------------------------
# nutrition_get_rda
if previous_intent == "nutrition_get_rda":
prediction = tracker.latest_message
entity_value = prediction['entities'][0]['value']
return [FollowupAction(next_action),
SlotSet("x", entity_value), SlotSet("y", ""),
SlotSet("previous_intent", "nutrition_get_rda")]
# ------------------------------------------------
# nutrition_get_upper_limit
if previous_intent == "nutrition_get_upper_limit":
prediction = tracker.latest_message
entity_value = prediction['entities'][0]['value']
return [FollowupAction(next_action),
SlotSet("x", entity_value), SlotSet("y", ""),
SlotSet("previous_intent", "nutrition_get_upper_limit")]
except:
dispatcher.utter_message(text="אין למושג, מצטער!")
return []
# ------------------------------------------------------------------
class Actionxcaniny(Action):
def name(self) -> Text:
return "action_nutrition_what_xcanbeiny"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
meal = ""
# loading data frame
db_dict = load_db(0x402)
lut = db_dict['lut']
df_noa = db_dict['food_units_features']
# get the meal type
message = tracker.latest_message.get('text')
if 'בוקר' in message:
meal = "IL_Breakfast"
if 'צהריים' in message:
meal = "IL_Lunch"
if 'ערב' in message:
meal = 'IL_Dinner'
# get the entity value from the bot
prediction = tracker.latest_message
entity_value = prediction['entities'][0]['value']
if entity_value == 'צמחוני':
entity = "Vegetarian"
elif entity_value == 'טבעוני':
entity = "Vegan"
elif entity_value == 'פלאו':
entity = "Vegan"
else:
# get the alias entity from the data frame
entity_temp = lut[lut.index == entity_value]
entity = str(entity_temp['Entity'].values[0])
entity2 = ""
for i in entity:
if (i >= 'a' and i <= 'z') or (i >= 'A' and i <= 'Z') or i == '_':
entity2 += i
if entity2[0].islower():
entity = entity2.capitalize()
# get the items by ranmdom 5 of them
items = df_noa.loc[((df_noa[entity] == 'Yes') & (df_noa[meal] == 'Yes')), ['Food_Name', entity, meal]]
indeX = items.index.tolist()
y = ""
for i in range(1, 6):
temp = random.randint(0, len(items) - 1)
y += str(i) + ". " + str(items[items.index == indeX[temp]]['Food_Name'].values[0]) + "\n"
dispatcher.utter_message(y)
except:
dispatcher.utter_message(text="אין למושג, מצטער!")
# ------------------------------------------------------------------
class ActionMealQuestion(Action):
def name(self) -> Text:
return "action_nutrition_meal_question"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
meal = []
previous_intent = ""
message = tracker.latest_message.get('text') if tracker.latest_message.get('text') else None
title = 'תפריט יומי'
# get the question from the slot
if message is None:
message = tracker.get_slot('x') if tracker.get_slot('x') else None
if 'בוקר' in message:
previous_intent = "nutrition_meal_question"
meal = ['breakfast']
title = 'ארוחת בוקר'
elif 'צהריים' in message:
previous_intent = "nutrition_meal_question"
meal = ['lunch']
title = 'ארוחת צהריים'
elif 'ערב' in message:
previous_intent = "nutrition_meal_question"
meal = ['dinner']
title = 'ארוחת ערב'
else:
meal = ['breakfast', 'lunch', 'dinner']
if True:
res, url = core_fun(meal, title)
dispatcher.utter_message(text="%s" % res, image=url)
else:
dispatcher.utter_message(text="אין למושג, מצטער!")
return [SlotSet("x", ""), SlotSet("y", ""), SlotSet("previous_intent", previous_intent)]
# ------------------------------------------------------------------
class ActionSimpleQuestion(Action):
def name(self) -> Text:
return "action_simple_question"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
db_dict = load_db(0x6)
lut_df = db_dict['lut']
custom_df = db_dict['nutrients_qna']
user_intent = tracker.latest_message.get('intent').get('name')
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name()].values and ent['value'] in lut_df['Entity']:
simple_entity = ent['value']
try:
feature = lut_df['Entity'][simple_entity]
if feature in custom_df.index:
res = custom_df.loc[feature][user_intent]
else:
res = custom_df[[str(s) in feature for s in custom_df.index.tolist()]][user_intent][0]
if 'slot#' in res:
res_list = res.split(' ')
for k, el in enumerate(res_list):
if 'slot#' in el:
res_list[k] = tracker.get_slot(el.split('#')[1])
res = ' '.join(res_list)
res_list = re.findall('\{.*?\}', res)
for match in res_list:
res = res.replace(match, str(eval(match[1:-1])))
dispatcher.utter_message(text="%s" % res)
except:
dispatcher.utter_message(text="אין לי מושג, מצטער!")
return []
# ------------------------------------------------------------------
class ActionGetRDAQuestion(Action):
def name(self) -> Text:
return "action_nutrition_get_rda"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
user_intent = tracker.latest_message.get('intent').get('name')
intent_upper = user_intent == 'nutrition_get_upper_limit'
previous_intent = tracker.get_slot('previous_intent') if tracker.get_slot('previous_intent') else None
if previous_intent == "nutrition_get_upper_limit" or previous_intent == "nutrition_get_rda":
intent = previous_intent
else:
intent = user_intent
rda_val, rda_units, rda_text, rda_status, nutrient = get_rda(self.name(), tracker, intent_upper)
if rda_val > 0:
intent_upper_str = "המקסימלית" if intent_upper else "המומלצת"
res = "הקצובה היומית %s של %s %s היא\r %.2f %s" % \
(intent_upper_str, nutrient, get_personal_str(rda_status, tracker), rda_val, rda_units)
res += "\r"
res += rda_text if rda_text else ""
else:
if rda_text:
res = rda_text
else:
res = "אין לי מושג, מצטער!"
dispatcher.utter_message(text="%s" % res)
return [SlotSet("previous_intent", intent), SlotSet("x", ""), SlotSet("y", "")]
# ------------------------------------------------------------------
class ActionNutritionHowManyXinY(Action):
def name(self) -> Text:
return "action_nutrition_howmanyxiny"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
db_dict = load_db(0x293)
db_df = db_dict['tzameret']
lut_df = db_dict['lut']
common_df = db_dict['common_food']
units_df = db_dict['food_units']
units_aliases_df = db_dict['food_units_aliases']
user_msg = tracker.latest_message.get('text')
user_intent = tracker.latest_message.get('intent').get('name')
intent_upper = user_intent == 'nutrition_get_upper_limit'
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
# Fetch X and Y (from slots, from entities or from regex):
y = None
x = tracker.get_slot('x') if tracker.get_slot('x') else None
if tracker.latest_message.get('entities'):
y = tracker.get_slot('y') if tracker.get_slot('y') else None
name_xy = self.name() + "_x"
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name() + "_x"].values:
x = ent['value']
name_xy = self.name() + "_x"
elif ent['entity'] in lut_df[self.name() + "_y"].values:
y = ent['value']
name_xy = self.name() + "_y"
regex_res = re.search('כמה (.*) יש ב(.*)', user_msg.replace('?', ''))
if regex_res:
x = regex_res.group(1)
y = regex_res.group(2).strip()
if not y:
regex_res = re.search('.* ב(.*)', user_msg.replace('?', ''))
if regex_res:
y = regex_res.group(1).strip()
food_units = "100 גרם"
regex_units_res = re.search('(.*) של (.*)', y) if y else None
if regex_units_res:
food_units = regex_units_res.group(1)
y = regex_units_res.group(2)
if food_units in units_aliases_df['Unit Alias'].values:
food_units = units_aliases_df[units_aliases_df['Unit Alias'] == food_units]['Zameret unit'].values[0]
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
try:
y_common = y
if y in common_df.index:
y_common = common_df[common_df.index == y]['shmmitzrach'][0]
else:
y_food = ' '.join(y.split(' ')[1:])
food_units = units_aliases_df[units_aliases_df['Unit Alias'] == y.split(' ')[0]]['Zameret unit']
if food_units.empty:
food_units = y.split(' ')[0]
else:
food_units = food_units.values[0]
if y_food in common_df.index:
y_common = common_df[common_df.index == y_food]['shmmitzrach'][0]
else:
y_common = y_food
food = db_df[db_df['shmmitzrach'].str.contains(y_common)].iloc[0, :]
feature = lut_df[lut_df.index == x]["Entity"][0]
units = lut_df[lut_df.index == x]["Units"][0]
food_units_row = pd.Series()
if food_units:
food_units_row = units_df[(units_df['smlmitzrach'] == int(food['smlmitzrach'])) &
(units_df['shmmida'] == food_units)]
is_food_units_match = not food_units_row.empty or food_units == "100 גרם"
food_units_factor = 1.0
if not food_units_row.empty:
food_units_factor = food_units_row['mishkal'].values[0] / 100
val = food[feature] * food_units_factor
# calculating the cake diagram feature
# 1 gram fat is 9 calories
# 1 gram protein is 4 calories
# 1 gram carb is 4 calories
fat_calories = int(food['total_fat'] * food_units_factor * 9)
protein_calories = int(food['protein'] * food_units_factor * 4)
carbs_calories = int(food['carbohydrates'] * food_units_factor * 4)
title = "ב" + y_common
data = {'שומן': fat_calories,
'פחמימות': carbs_calories,
'חלבונים': protein_calories}
url = iniliatize_Diagram(title, data)
if units == 0:
res = "ב-%s של %s יש %.2f %s" % (food_units, food['shmmitzrach'], float(val), x)
else:
res = ""
if not is_food_units_match:
res = "לא הצלחתי למצוא נתונים במאגר על היחידה %s עליה שאלת\r" % food_units
res += "היחידות הבאות קיימות במאגר, עבור %s:\r" % food['shmmitzrach']
res += ', '.join(
units_df[units_df['smlmitzrach'] == int(food['smlmitzrach'])]['shmmida'].to_list())
res += "\r"
food_units = "100 גרם"
res += "ב-%s של %s יש %.2f %s %s" % (food_units, food['shmmitzrach'], float(val), units, x)
rda_val, rda_units, rda_text, rda_status, nutrient = get_rda(name_xy, tracker, intent_upper)
if rda_val > 0 and units not in ['יחב"ל']:
rda = 100 * float(val) / rda_val
intent_upper_str = "המקסימלית" if intent_upper else "המומלצת"
res += "\r"
res += "שהם כ-%d אחוז מהקצובה היומית %s %s" % (
int(rda), intent_upper_str, get_personal_str(rda_status, tracker))
res += "\r"
res += rda_text if rda_text else ""
dispatcher.utter_message(text="%s" % res, image=url)
except:
dispatcher.utter_message(text="אין לי מושג כמה, מצטער!")
return [SlotSet("x", x), SlotSet("y", y), SlotSet("previous_intent", "nutrition_howmanyxiny")]
# ------------------------------------------------------------------
class ActionIsFoodHealthyQuestion(Action):
def name(self) -> Text:
return "action_nutrition_is_food_healthy"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
db_dict = load_db(0x33)
db_df = db_dict['tzameret']
lut_df = db_dict['lut']
common_df = db_dict['common_food']
food_ranges_df = db_dict['food_ranges']
food = ""
food_entity = ""
x = tracker.get_slot('x') if tracker.get_slot('x') else None
if x is not None and x is not "":
food = x
food_entity = x
else:
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name()].values:
food_entity = ent['value']
food = food_entity
break
try:
if food in common_df.index:
food = common_df[common_df.index == food]['shmmitzrach'][0]
food = db_df[db_df['shmmitzrach'].str.contains(food)].iloc[0, :]
_, nutrition_density_res = get_food_nutrition_density(food, food_ranges_df)
advantages = []
disadvantages = []
for idx, row in food_ranges_df.iterrows():
if row["tzameret_name"]:
if row["good_or_bad"] == "good":
value = float(food[row["tzameret_name"]])
if idx == "Protein":
threshold = 250
else:
threshold = float(row["Medium - threshold per 100gr"])
if value > threshold:
advantages.append(row["hebrew_name"])
elif row["good_or_bad"] == "bad":
value = float(food[row["tzameret_name"]])
if idx == "Protein":
threshold = 250
else:
threshold = float(row["High - threshold per 100gr"])
if value > threshold:
disadvantages.append(row["hebrew_name"])
nutrition_density_normalized = float(food["Nutrition density normalized"])
if nutrition_density_res == "low":
res = "ב%s יש צפיפות תזונתית (רכיבים תזונתיים טובים ביחס לקלוריות) נמוכה" % food_entity
elif nutrition_density_res == "med":
res = "ב%s יש צפיפות תזונתית (רכיבים תזונתיים טובים ביחס לקלוריות) בינונית" % food_entity
elif nutrition_density_res == "high":
res = "ב%s יש צפיפות תזונתית (רכיבים תזונתיים טובים ביחס לקלוריות) גבוהה" % food_entity
if disadvantages:
res += ". "
res += "החסרונות של %s הם הרבה %s" % (food_entity, ", ".join(disadvantages))
if advantages:
res += ". "
res += "היתרונות של %s הם הרבה %s" % (food_entity, ", ".join(advantages))
dispatcher.utter_message(text="%s" % res)
except:
dispatcher.utter_message(text="אין לי מושג, מצטער!")
return [SlotSet("previous_intent", "nutrition_is_food_healthy"), SlotSet("x", ""), SlotSet("y", "")]
# ------------------------------------------------------------------
class ActionWhatIsHealthierQuestion(Action):
def name(self) -> Text:
return "action_nutrition_what_is_healthier"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
db_dict = load_db(0x33)
db_df = db_dict['tzameret']
lut_df = db_dict['lut']
common_df = db_dict['common_food']
food_ranges_df = db_dict['food_ranges']
user_msg = tracker.latest_message.get('text')
food_entity1 = None
food_entity2 = None
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name() + "_x"].values:
food_entity1 = ent['value']
elif ent['entity'] in lut_df[self.name() + "_y"].values:
food_entity2 = ent['value']
if not food_entity2:
regex_res = re.search('.* או (.*)', user_msg.replace('?', ''))
if regex_res:
food_entity2 = regex_res.group(1).strip()
try:
nutrition_density_cmp = []
advantages_cmp = []
disadvantages_cmp = []
for food_entity in (food_entity1, food_entity2):
food = food_entity
if food in common_df.index:
food = common_df[common_df.index == food]['shmmitzrach'][0]
food = db_df[db_df['shmmitzrach'].str.contains(food)].iloc[0, :]
nutrition_density, _ = get_food_nutrition_density(food, food_ranges_df)
advantages = []
disadvantages = []
for idx, row in food_ranges_df.iterrows():
if row["tzameret_name"]:
if row["good_or_bad"] == "good":
value = float(food[row["tzameret_name"]])
if idx == "Protein":
threshold = 250
else:
threshold = float(row["Medium - threshold per 100gr"])
if value > threshold:
advantages.append(row["hebrew_name"])
elif row["good_or_bad"] == "bad":
value = float(food[row["tzameret_name"]])
if idx == "Protein":
threshold = 250
else:
threshold = float(row["High - threshold per 100gr"])
if value > threshold:
disadvantages.append(row["hebrew_name"])
nutrition_density_cmp.append(float(food["Nutrition density normalized"]))
if disadvantages:
disadvantages_cmp.append("החסרונות של %s הם הרבה %s" % (food_entity, ", ".join(disadvantages)))
if advantages:
advantages_cmp.append("היתרונות של %s הם הרבה %s" % (food_entity, ", ".join(advantages)))
if nutrition_density_cmp[0] > nutrition_density_cmp[1]:
res = "לפי צפיפות תזונתית %s עדיף על פני %s\r" % (food_entity1, food_entity2)
elif nutrition_density_cmp[0] < nutrition_density_cmp[1]:
res = "לפי צפיפות תזונתית %s עדיף על פני %s\r" % (food_entity2, food_entity1)
else:
res = "לפי צפיפות תזונתית %s ו-%s שקולים\r" % (food_entity1, food_entity2)
if nutrition_density_cmp[0] < nutrition_density_cmp[1]:
advantages_cmp.reverse()
disadvantages_cmp.reverse()
for advantage in advantages_cmp:
if advantage:
res += "%s\r" % advantage
for disadvantage in disadvantages_cmp:
if disadvantage:
res += "%s\r" % disadvantage
dispatcher.utter_message(text="%s" % res)
except:
dispatcher.utter_message(text="אין לי מושג כמה, מצטער!")
return []
# ------------------------------------------------------------------
class ActionWhatIsRecommendedQuestion(Action):
def name(self) -> Text:
return "action_nutrition_is_food_recommended"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
db_dict = load_db(0x3b)
db_df = db_dict['tzameret']
lut_df = db_dict['lut']
food_qna_df = db_dict['food_qna']
common_df = db_dict['common_food']
food_ranges_df = db_dict['food_ranges']
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name()].values:
food_entity = ent['value']
break
try:
food = food_entity
if food in common_df.index:
food = common_df[common_df.index == food]['shmmitzrach'][0]
food = db_df[db_df['shmmitzrach'].str.contains(food)].iloc[0, :]
_, nutrition_density_res = get_food_nutrition_density(food, food_ranges_df)
_, nutrition_energy_res = get_food_energy_density(food, food_ranges_df)
description_density_row = food_qna_df[(food_qna_df.index == nutrition_density_res) &
(food_qna_df.energy_density == nutrition_energy_res)]
res = description_density_row['description_density'].values[0]
res = res.replace('var#food', food_entity)
dispatcher.utter_message(text="%s" % res)
except:
dispatcher.utter_message(text="אין לי מושג, מצטער!")
return []
# ------------------------------------------------------------------
class ActionEatBeforeTrainingQuestion(Action):
def name(self) -> Text:
return "action_eat_before_training"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
db_dict = load_db(0x10)
custom_df = db_dict['common_food']
user_intent = tracker.latest_message.get('intent').get('name')
training_type = tracker.get_slot("training_type")
training_duration = tracker.get_slot("training_duration")
try:
if training_type == 'ריצת אינטרוולים':
if training_duration:
res = custom_df['Entity'][training_type + ' מעל ' + training_duration][0]
else:
res = custom_df['Entity'][training_type][0]
dispatcher.utter_message(text="%s" % res)
except:
dispatcher.utter_message(text="אין לי מושג, מצטער!")
return []
# ------------------------------------------------------------------
class ActionBloodtestGenericQuestion(Action):
def name(self) -> Text:
return "action_nutrition_bloodtest_generic"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
db_dict = load_db(0x102)
lut_df = db_dict['lut']
bloodtest_df = db_dict['bloodtest_vals']
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name()].values:
bloodtest_entity = ent['value']
break
try:
feature = db_dict['lut']['Entity'][bloodtest_entity]
gender_str = "Male"
if tracker.get_slot('gender') == "זכר":
gender_str = "Male"
elif tracker.get_slot('gender') == "נקבה":
gender_str = "Female"
age = float(tracker.get_slot('age') if tracker.get_slot('age') else "40")
bloodtest_row = bloodtest_df[(bloodtest_df['Element'] == feature) & \
((bloodtest_df['Gender'] == "ANY") | (
bloodtest_df['Gender'] == gender_str)) & \
((bloodtest_df['Age min'] == "ANY") | (
bloodtest_df['Age min'].replace('ANY', -1).astype(float) <= age)) & \
((bloodtest_df['Age Max'] == "ANY") | (
bloodtest_df['Age Max'].replace('ANY', -1).astype(float) > age))]
bloodtest_type = bloodtest_row['Graph type'].values[0]
bloodtest_min = bloodtest_row['Min'].values[0]
bloodtest_thr1 = bloodtest_row['Threshold 1'].values[0]
bloodtest_thr2 = bloodtest_row['Threshold 2'].values[0]
bloodtest_max = bloodtest_row['Max'].values[0]
if bloodtest_type == 1:
res = 'ערך תקין עבור בדיקת %s בין %.2f ועד %.2f, ערך מעל %.2f נחשב חריג' % (
bloodtest_entity, bloodtest_min, bloodtest_thr1, bloodtest_thr2)
elif bloodtest_type == 2:
res = 'ערך תקין עבור בדיקת %s בין %.2f ועד %.2f, ערך מתחת %.2f נחשב חריג' % (
bloodtest_entity, bloodtest_thr2, bloodtest_max, bloodtest_thr1)
elif bloodtest_type == 3:
res = 'ערך תקין עבור בדיקת %s בין %.2f ועד %.2f' % (
bloodtest_entity, bloodtest_thr1, bloodtest_thr2)
dispatcher.utter_message(text="%s" % res)
except:
dispatcher.utter_message(text="אין לי מושג, מצטער!")
return []
# ------------------------------------------------------------------
class ActionBloodtestValueQuestion(Action):
def name(self) -> Text:
return "action_nutrition_bloodtest_value"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
db_dict = load_db(0x102)
lut_df = db_dict['lut']
bloodtest_df = db_dict['bloodtest_vals']
user_msg = tracker.latest_message.get('text')
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in [x for x in lut_df[self.name()].values if x != 0]:
if ent['entity'] == 'integer':
val = ent['value']
else:
bloodtest_entity = ent['value']
if not val:
regex_res = re.search('האם (.*) הוא .*', user_msg.replace('?', ''))
if regex_res:
val = regex_res.group(1).strip()
try:
if not val:
raise Exception()
feature = db_dict['lut']['Entity'][bloodtest_entity]
gender_str = "Male"
if tracker.get_slot('gender') == "זכר":
gender_str = "Male"
elif tracker.get_slot('gender') == "נקבה":
gender_str = "Female"
age = float(tracker.get_slot('age') if tracker.get_slot('age') else "40")
bloodtest_row = bloodtest_df[(bloodtest_df['Element'] == feature) & \
((bloodtest_df['Gender'] == "ANY") | (
bloodtest_df['Gender'] == gender_str)) & \
((bloodtest_df['Age min'] == "ANY") | (
bloodtest_df['Age min'].replace('ANY', -1).astype(float) <= age)) & \
((bloodtest_df['Age Max'] == "ANY") | (
bloodtest_df['Age Max'].replace('ANY', -1).astype(float) > age))]
bloodtest_type = bloodtest_row['Graph type'].values[0]
bloodtest_min = bloodtest_row['Min'].values[0]
bloodtest_thr1 = bloodtest_row['Threshold 1'].values[0]
bloodtest_thr2 = bloodtest_row['Threshold 2'].values[0]
bloodtest_max = bloodtest_row['Max'].values[0]
if bloodtest_type == 1:
if bloodtest_min <= float(val) <= bloodtest_thr1:
res = 'כן, זהו ערך תקין עבור בדיקת %s היות והוא נופל בטווח בין %.2f ועד %.2f. ערך מעל %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_min, bloodtest_thr1, bloodtest_thr2)
else:
res = 'לא, זהו אינו ערך תקין עבור בדיקת %s. ערך תקין הינו בטווח בין %.2f ועד %.2f. ערך מעל %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_min, bloodtest_thr1, bloodtest_thr2)
elif bloodtest_type == 2:
if bloodtest_thr2 <= float(val) <= bloodtest_max:
res = 'כן, זהו ערך תקין עבור בדיקת %s היות והוא נופל בטווח בין %.2f ועד %.2f. ערך מתחת %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_thr2, bloodtest_max, bloodtest_thr1)
else:
res = 'לא, זהו אינו ערך תקין עבור בדיקת %s. ערך תקין הינו בטווח בין %.2f ועד %.2f. ערך מתחת %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_thr2, bloodtest_max, bloodtest_thr1)
elif bloodtest_type == 3:
if bloodtest_thr1 <= float(val) <= bloodtest_thr2:
res = 'כן, זהו ערך תקין עבור בדיקת %s היות והוא נופל בטווח בין %.2f ועד %.2f' % (
bloodtest_entity, bloodtest_thr1, bloodtest_thr2)
else:
res = 'לא, זהו אינו ערך תקין עבור בדיקת %s. ערך תקין הינו בטווח בין %.2f ועד %.2f.' % (
bloodtest_entity, bloodtest_thr1, bloodtest_thr2)
else:
raise Exception()
dispatcher.utter_message(text="%s" % res)
except:
dispatcher.utter_message(text="אין לי מושג, מצטער!")
return []
# ------------------------------------------------------------------
class ActionFoodSubstituteQuestion(Action):
def name(self) -> Text:
return "action_nutrition_food_substitute"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
db_dict = load_db(0xc33)
db_df = db_dict['tzameret']
lut_df = db_dict['lut']
features_df = db_dict['food_units_features']
common_df = db_dict['common_food']
food_ranges_df = db_dict['food_ranges']
subs_tags_alias_df = db_dict['subs_tags_alias']
user_msg = tracker.latest_message.get('text')
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name()].values:
food_entity = ent['value']
break
tzameret_groups_lut = {}
tzameret_groups_lut['1'] = ['1', '4'] # Milk
tzameret_groups_lut['2'] = ['1', '2', '3', '4'] # Meat
tzameret_groups_lut['3'] = ['1', '2', '3', '4'] # Eggs
tzameret_groups_lut['4'] = ['1', '4'] # Dairy
tzameret_groups_lut['5'] = ['5', '6', '7', '9'] # Snacks
tzameret_groups_lut['6'] = ['5', '6', '7', '9'] # Fruits
tzameret_groups_lut['7'] = ['5', '6', '7', '9'] # Vegetables
tzameret_groups_lut['8'] = ['8', '4'] # Fat
tzameret_groups_lut['9'] = ['5', '6', '7', '9'] # Beverages
food_energy_thr = 0.05
def get_advantages(food):
advantages = []
for idx, row in food_ranges_df.iterrows():
if row["tzameret_name"] and row["tzameret_name"] in food:
if row["good_or_bad"] == "good":
value = float(food[row["tzameret_name"]])
if idx == "Protein":
threshold = 250
else:
threshold = float(row["Medium - threshold per 100gr"])
if value > threshold:
advantages.append(row["hebrew_name"])
return advantages
def get_advantages_score(food):
act = food['advantages']
ref = ast.literal_eval(food['advantages_ref'])
intersection = []
if isinstance(act, list) and isinstance(ref, list):
intersection = list(set(act) & set(ref))
return len(intersection)
try:
food = food_entity
if food in common_df.index:
food = common_df[common_df.index == food]['shmmitzrach'][0]
food_tzameret = db_df[db_df['shmmitzrach'].str.contains(food)].iloc[0, :]
tzameret_code = int(food_tzameret['smlmitzrach'])
tzameret_code_msb = food_tzameret['smlmitzrach'][0]
food_energy = food_tzameret['food_energy']
food_features = features_df[features_df['smlmitzrach'].fillna(0).astype(int) == tzameret_code]
user_msg_feature_v = []
user_msg_feature_k = list(
set(subs_tags_alias_df.index.to_list()) & set(user_msg.replace(',', '').split(" ")))
for tag in user_msg_feature_k:
tag_df = subs_tags_alias_df[subs_tags_alias_df.index == tag]['Entity']
if tag_df.any:
user_msg_feature_v.append(tag_df.values[0])
food_filter_1 = db_df[db_df['smlmitzrach'].str[0].isin(tzameret_groups_lut[tzameret_code_msb])]
food_filter_2 = db_df[abs(db_df['food_energy'] - food_energy) / food_energy < food_energy_thr]
food_filter_1_2 =
|
pd.merge(food_filter_1, food_filter_2, how='inner')
|
pandas.merge
|
"""
Functions to create candidate data DataFrames
"""
import pandas as pd
pd.options.mode.chained_assignment = None
def create_df(dictionary):
'''
Functions that converts dictionary into pandas DataFrame
Args:
dictionary: dictionary to be converted into pandas DataFrame
Returns:
created_df: pandas DataFrame
'''
created_df = pd.DataFrame.from_dict(dictionary, orient='columns')
return created_df
def merge_df(df1, df2, how='left', merge_on='id'):
'''
Function to create candidate activity dictionary
Args:
df1: left DataFrame to merge
df2: right DataFrame to merge
how: Type of merge to be performed
merge_on: Column or index level names to join on (single label or list)
Returns:
merged_df: merged DataFrame
'''
merged_df = pd.merge(df1, df2, how=how, on=merge_on)
return merged_df
def transform_df(df_to_trans):
'''
Function to transform DataFrame
Args:
df_transform: DataFrame to be transformed
Returns:
transformed_df: Transformed DataFrame
'''
# Rename duplicate column name to prevent error when creating SQL database
df_to_trans = df_to_trans.rename(columns={'sourced': 'is_sourced'})
# Replace and Delete columns
change_col = 'Inplannen 1e gesorek'
keep_col = 'To schedule'
df_to_trans[keep_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())] = \
df_to_trans[change_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())]
df_to_trans.drop(change_col, axis=1, inplace=True)
change_col = 'Inplannen 1e gesprek'
keep_col = 'To schedule'
df_to_trans[keep_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())] = \
df_to_trans[change_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())]
df_to_trans.drop(change_col, axis=1, inplace=True)
change_col = 'inplannen 2e gesprek'
keep_col = '1st Interview'
df_to_trans[keep_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())] = \
df_to_trans[change_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())]
df_to_trans.drop(change_col, axis=1, inplace=True)
change_col = '1e gesprek'
keep_col = '1st Interview'
df_to_trans[keep_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())] = \
df_to_trans[change_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())]
df_to_trans.drop(change_col, axis=1, inplace=True)
change_col = 'Interview 1'
keep_col = '1st Interview'
df_to_trans[keep_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())] = \
df_to_trans[change_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())]
df_to_trans.drop(change_col, axis=1, inplace=True)
change_col = 'Interview 2'
keep_col = '2nd Interview'
df_to_trans[keep_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())] = \
df_to_trans[change_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())]
df_to_trans.drop(change_col, axis=1, inplace=True)
change_col = 'Assessment'
keep_col = '2nd Interview'
df_to_trans[keep_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())] = \
df_to_trans[change_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())]
df_to_trans.drop(change_col, axis=1, inplace=True)
change_col = '2e gesprek'
keep_col = '2nd Interview'
df_to_trans[keep_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())] = \
df_to_trans[change_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())]
df_to_trans.drop(change_col, axis=1, inplace=True)
change_col = 'Aanbieding'
keep_col = 'Offer'
df_to_trans[keep_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())] = \
df_to_trans[change_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())]
df_to_trans.drop(change_col, axis=1, inplace=True)
change_col = 'Aangenomen'
keep_col = 'Hired'
df_to_trans[keep_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())] = \
df_to_trans[change_col][(df_to_trans[change_col].notnull()) & (df_to_trans[keep_col].isnull())]
df_to_trans.drop(change_col, axis=1, inplace=True)
# Delete Columns
df_to_trans.drop('Test Fase', axis=1, inplace=True)
df_to_trans.drop('intern evalueren', axis=1, inplace=True)
df_to_trans.drop('Plan 1', axis=1, inplace=True)
df_to_trans.drop('Plan 2', axis=1, inplace=True)
df_to_trans.drop('Vergaarbak', axis=1, inplace=True)
# Remove only for now (will be used for source of candidate)
df_to_trans.drop('tags', axis=1, inplace=True)
# Replace np.nan with None, as None is accepted if df is written to a DB using df.to_sql
# None will only be converted to NULL in SQL if df.to_sql is used, not using executemany
# NaT is converted as None if using to_sql
df_to_trans = df_to_trans.where((pd.notnull(df_to_trans)), None)
# Convert None to 'nan' if getting errors when inserting into MySQL DB
# df.fillna(value='nan', inplace=True)
# Convert date columns into DATE columns with specified format
date_cols = [
'hired_at',
'Sourced',
'Applied',
'Shortlisted',
'Talentpool',
'Review',
'To schedule',
'1st Interview',
'2nd Interview',
'Offer',
'Hired',
'disqualified_at'
]
for date_col in date_cols:
df_to_trans[date_col] = pd.to_datetime(
|
pd.to_datetime(df_to_trans[date_col])
|
pandas.to_datetime
|
import os
import numpy as np
import pandas as pd
import xgboost as xgb
# 统计销售总量
def generate_sales_sum():
items_path = os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//items.csv")
item_categories_path = os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//item_categories.csv")
sales_train_path = os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//sales_train.csv")
shops_path = os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//shops.csv")
test_path = os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//test.csv")
my_submission_path = os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//my_submission.csv")
sample_submission_path = os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//sample_submission.csv")
sales_train = pd.read_csv(sales_train_path)
sales_train = sales_train.drop(
sales_train[sales_train.item_price < 0].index | sales_train[sales_train.item_price >= 100000].index)
sales_train = sales_train.drop(
sales_train[sales_train.item_cnt_day < 0].index | sales_train[sales_train.item_cnt_day >= 1000].index)
sales_train_sort = sales_train.sort_values(["shop_id", "item_id", "date_block_num"], inplace=False)
sales_train_sort.to_csv(os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//123.csv"), index=False)
test = pd.read_csv(test_path)
test_sort = test.sort_values(["shop_id", "item_id"], inplace=False)
test_sort.to_csv(os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//1234.csv"), index=False)
test_idx = 0
my_submission = pd.read_csv(sample_submission_path)
# item cnt
item_cnt = 0
date_block = -1
sum_price = 0
X, y = init_x_y()
for i in range(0, len(sales_train_sort)):
if less(sales_train_sort.iloc[i, 2], test_sort.iloc[test_idx, 1], sales_train_sort.iloc[i, 3],
test_sort.iloc[test_idx, 2]):
continue
# 同一个店铺的所有数据
if same(sales_train_sort.iloc[i, 2], test_sort.iloc[test_idx, 1], sales_train_sort.iloc[i, 3],
test_sort.iloc[test_idx, 2]):
if date_block == -1:
date_block = sales_train_sort.iloc[i, 1]
# 同一个月
if sales_train_sort.iloc[i, 1] == date_block:
item_cnt += sales_train_sort.iloc[i, 5]
sum_price += sales_train_sort.iloc[i, 4] * item_cnt
else:
X[date_block][2] = sum_price / item_cnt
y[date_block] = item_cnt
item_cnt = sales_train_sort.iloc[i, 5]
sum_price = sales_train_sort.iloc[i, 4] * item_cnt
date_block = sales_train_sort.iloc[i, 1]
else:
if item_cnt != 0:
y[date_block] = item_cnt
X[date_block][2] = sum_price / item_cnt
model = xgb.XGBRegressor(max_depth=4, learning_rate=0.1, n_estimators=32, objective="reg:squarederror")
model.fit(X=X, y=y, eval_metric='rmse')
pre = round(model.predict([[10, 2, X[date_block][2]]])[0], 2)
print(" y=", y, " index=", i, " shop_id=", test_sort.iloc[test_idx, 1], " item_id=",
test_sort.iloc[test_idx, 2], "ID=", test_sort.iloc[test_idx, 0], "price=", X[date_block][2],
" pre=", pre)
if pre > 20:
pre = 20
elif pre < 0:
pre = 0
my_submission.iloc[test_sort.iloc[test_idx, 0], 1] = pre
item_cnt = 0
sum_price = 0
date_block = -1
X, y = init_x_y()
test_idx += 1
if test_idx == len(test):
break
my_submission.to_csv(my_submission_path, index=False)
def predictFutureSales():
sales_train_path = os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//sales_train.csv")
items_path = os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//items.csv")
test_path = os.path.join(os.path.dirname("__file__"),
"data//competitive-data-science-predict-future-sales//test.csv")
sales_train = pd.read_csv(sales_train_path)
sales_train = sales_train.drop(
sales_train[sales_train.item_price < 0].index | sales_train[sales_train.item_price >= 100000].index)
sales_train = sales_train.drop(
sales_train[sales_train.item_cnt_day < 0].index | sales_train[sales_train.item_cnt_day >= 1000].index)
sales_train = pd.pivot_table(sales_train, index=['shop_id', 'item_id', 'date_block_num'],
aggfunc={'item_price': np.mean, 'item_cnt_day': np.sum}, fill_value=0).reset_index()
sales_train.insert(3, 'month', sales_train['date_block_num'] % 12)
sales_train.insert(3, 'year', sales_train['date_block_num'] // 12)
item = pd.read_csv(items_path)
test = pd.read_csv(test_path)
sales_train = pd.merge(sales_train, item.iloc[:, [1, 2]], on=['item_id'], how='left')
sales_train =
|
pd.merge(sales_train, test, on=['shop_id', 'item_id'], how='left')
|
pandas.merge
|
#!/usr/bin/env python
#
# Parses and compiles metrics previously computed by calc_metrics.sh.
#
# Usage: ./compile_metrics.py FOLDER
#
# FOLDER should contain subfolders like lddt, trr_score, etc. This will output
# a file, combined_metrics.csv, in FOLDER.
#
import pandas as pd
import numpy as np
import os, glob, argparse, sys
from collections import OrderedDict
p = argparse.ArgumentParser()
p.add_argument('folder', help='Folder of outputs to process')
p.add_argument('--out', help='Output file name.')
args = p.parse_args()
if args.out is None:
args.out = os.path.join(args.folder,'combined_metrics.csv')
if not os.path.isdir(args.folder):
sys.exit(f'ERROR: Input path {args.folder} not a folder.')
def parse_fastdesign_filters(folder):
files = glob.glob(os.path.join(folder,'*.pdb'))
records = []
for f in files:
row = OrderedDict()
row['name'] = os.path.basename(f)[:-4]
recording = False
with open(f) as inf:
for line in inf:
if recording and len(line)>1:
tokens = line.split()
if len(tokens) == 2:
row[tokens[0]] = float(tokens[1])
if '#END_POSE_ENERGIES_TABLE' in line:
recording=True
if line.startswith('pose'):
row['rosetta_energy'] = float(line.split()[-1])
records.append(row)
if len(records)>0: return pd.DataFrame.from_records(records)
return pd.DataFrame({'name':[]})
def parse_lddt(folder):
data = {'name':[], 'lddt':[]}
files = glob.glob(os.path.join(folder,'*.npz'))
if len(files)==0:
return pd.DataFrame({'name':[]})
for f in files:
prefix = os.path.basename(f).replace('.npz','')
lddt_data = np.load(f)
data['lddt'].append(lddt_data['lddt'].mean())
data['name'].append(prefix)
return pd.DataFrame.from_dict(data)
def parse_rosetta_energy_from_pdb(folder):
files = glob.glob(os.path.join(folder,'*.pdb'))
records = []
for pdbfile in files:
with open(pdbfile) as inf:
name = os.path.basename(pdbfile).replace('.pdb','')
rosetta_energy = np.nan
for line in inf.readlines():
if line.startswith('pose'):
rosetta_energy = float(line.split()[-1])
row = OrderedDict()
row['name'] = name
row['rosetta_energy'] = rosetta_energy
records.append(row)
if len(records)==0: return
|
pd.DataFrame({'name':[]})
|
pandas.DataFrame
|
# Plot candlestick chart
import os
import mplfinance as mpf
import matplotlib
import datetime
import pandas as pd
import numpy as np
def __clean_index(df, valid_index):
# valid_index must be sorted
idx_to_drop = df.index[(df.index < valid_index[0]) | (df.index > valid_index[-1])]
df.drop(idx_to_drop, inplace=True)
def __extend_index(df, target_index):
df_ext = df.reindex(target_index)
# Align timestamps not exist in target_index
unmatched = set(df.index) - set(target_index)
for time in unmatched:
i = df_ext.index.get_loc(time, method='ffill') # align to the entry time of the bar
df_ext.iloc[i] = df[time]
# print(f'- align {time} to {df_ext.index[i]}')
return df_ext
def __add_mark_layer(mark_layer, mark_set, direction, mark_size, is_show_marks, bar_data):
if is_show_marks and mark_set is not None:
for color, marks in mark_set.items():
split_color = color.split('-')
if len(split_color) == 2:
color = split_color[0]
size = int(split_color[1])
else:
size = mark_size
__clean_index(marks, bar_data.index)
if not marks.empty:
marks = __extend_index(marks, bar_data.index)
if direction == 'buy':
mark_layer.append(mpf.make_addplot(marks, type='scatter', markersize=size, marker='^', color=color))
else:
mark_layer.append(mpf.make_addplot(marks, type='scatter', markersize=size, marker='v', color=color))
def candle_plot(bar_data, title, buy_marks=None, sell_marks=None, mark_size=100, show_marks=True, del_nan=False,
day_gap=False, save_to=None, bar_type='candle'):
"""
Works great for jupyter notebook. For other back ends, change "matplotlib.use()"
:param save_to: /folder/to/save/example.jpg, non-existent folders will be created
:param bar_type: 'candle' or 'line'
"""
bar_data = bar_data.copy()
if del_nan:
bar_data.dropna(subset=['Close'], inplace=True)
if day_gap:
# This works by adding 09:25-09:29 NaN prices, for CN stocks only
dates = set(bar_data.index.date)
indices = []
for date in dates:
indices.extend([datetime.datetime.combine(date, datetime.time(9, i)) for i in range(29, 24, -1)])
gap = pd.DataFrame(np.nan, index=indices, columns=bar_data.columns)
bar_data =
|
pd.concat([bar_data, gap])
|
pandas.concat
|
import rdkit.Chem as Chem
#from rdkit.Chem import Descriptors
import numpy as np
from build_encoding import decode
# =============================================================================
# import rdkit.Chem.Crippen as Crippen
# import rdkit.Chem.rdMolDescriptors as MolDescriptors
# =============================================================================
import subprocess
import pickle
import pandas as pd
from collections import OrderedDict
from time import sleep
import os, shutil
import glob
import math
import xgboost as xgb
#A scaling factor for reward
const = math.exp(3)
# Cache evaluated molecules (rewards are only calculated once)
evaluated_mols = {}
def modify_fragment(f, swap):
f[-(1+swap)] = (f[-(1+swap)] + 1) % 2
return f
# Discard molecules which fulfills all targets (used to remove to good lead molecules).
def clean_good(X, decodings):
X = [X[i] for i in range(X.shape[0]) if not
evaluate_mol(X[i], -1, decodings).all()]
return np.asarray(X)
def get_key(fs):
return tuple([np.sum([(int(x)* 2 ** (len(a) - y))
for x,y in zip(a, range(len(a)))]) if a[0] == 1 \
else 0 for a in fs])
# Get initial distribution of rewards among lead molecules
def get_init_dist(X, decodings):
#arr = np.asarray([evaluate_mol(X[i], -1, decodings) for i in range(X.shape[0])])
arr = np.asarray(bunch_eval(X,-1,decodings))
dist = arr.shape[0] / (1.0 + arr.sum(0)) #sum(0) => sum over all rows for each col
return dist
#function to get Padel descriptors and store in a csv file
def get_padel(mol_folder_path,file_path,max_time='1500'):
Padel_path = 'C:\\Users\\HP\\PaDEL-Descriptor\\PaDEL-Descriptor.jar'
cmd_list = ['java','-jar',Padel_path, '-dir', mol_folder_path, '-2d','-file', file_path,'-maxruntime', max_time,"-descriptortypes", 'xg_desc3.xml','-usefilenameasmolname']
out = subprocess.Popen(cmd_list,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout,stderr = out.communicate()
stdout = stdout.decode('utf-8')
with open('./Padel.txt','a') as f:
f.write(stdout)
def clean_folder(folder_path):
folder = folder_path
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as ex:
print('Failed to delete %s. Reason: %s' % (file_path, ex))
#Bunch evaluation
def bunch_evaluation(mols):
folder_path = "./generated_molecules/"
file_path = "./descriptors.csv"
f = open(file_path, "w+")
f.close()
#Cleaning up the older files
clean_folder(folder_path)
i = 0
SSSR =[]
for mol in mols:
try:
Chem.GetSSSR(mol)
print(Chem.MolToMolBlock((mol)),file=open(str(folder_path)+str(i)+'.mol','w'))
SSSR.append(True)
i = i + 1
except:
SSSR.append(False)
get_padel(folder_path,file_path)
#Reading the descriptors
xg_all = pd.read_csv(file_path)
names = xg_all['Name']
bad = []
with open('./saved_models/good_columns','rb') as f:
cols = pickle.load(f)
for col in xg_all.columns:
if col not in cols:
bad.append(col)
xg_all.drop(columns=bad,inplace=True)
#Verifying that all the required columns are there
assert len(xg_all.columns) == len(cols)
xg_all['Name'] = names
files = xg_all[pd.isnull(xg_all).any(axis=1)]['Name']
xg_all.dropna(inplace=True)
mol= []
if len(files) !=0:
uneval_folder = "C:\\Users\\HP\\AZC_Internship\\DeepFMPO\\3.6\\unevalmol\\"
clean_folder(uneval_folder)
for f in files:
m = Chem.MolFromMolFile(folder_path+str(f)+'.mol')
print(Chem.MolToMolBlock((m)),file=open(str(uneval_folder)+str(f)+'.mol','w'))
get_padel(uneval_folder,'./uneval_desc.csv','-1')
unevalmol =
|
pd.read_csv('./uneval_desc.csv')
|
pandas.read_csv
|
from pprint import pprint
from ipywidgets import Layout, Tab, Button, Label, Box, VBox, HBox, HTML, Output, Dropdown, BoundedIntText, Textarea
from IPython.display import display
import numpy as np
import pandas as pd
from pathlib import Path
import os
import unicodedata
import data.dataframe_preparation as preparation
import data.preprocessing as preprocessing
# from .. import data
# from data import dataframe_preparation as preparation
# from data import preprocessing
def render_text(text, include_line_no=True, normalize_text=True):
"""Renders a large text field with line numbers"""
if normalize_text:
text = ''.join([i if ord(i) < 128 else ' ' for i in text])
# text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore')
style = '' if not include_line_no else """
<style>
pre { counter-reset: line;}
code { counter-increment: line; }
code:before { display: inline-block; content: counter(line); width: 40px; background-color: #E8E8E8;}
</style>
"""
lines = [str(f"<code>{line}</code>\n") for line in text.splitlines()]
content = f"<pre>\n{''.join(lines)}</pre>"
widget = HTML(value=style+content)
return widget
int_layout = Layout(width="40px")
dropdown_layout = Layout(width="120px")
text_layout = Layout(width="500px")
comment_layout = Layout(width="300px")
cro_options = [('None', ''), ('Physical Risk', 'PR'),
('Transition Risk', 'TR')]
cro_sub_type_options = [('None', ''), ('PR - Accute', 'ACUTE'), ('PR - Chronic', 'CHRON'),
('TR - Policy and Legal', 'POLICY'), ('TR - Technology and Market',
'MARKET'), ('TR - Reputation', 'REPUT'
),
]
label_options = [('None', ''), ('Positive', True), ('Negative', False)]
def handle_cro_sub_type_update(event, row, update_labels, cro_type_field):
update_labels('update', row, 'cro_sub_type', event.new)
if event.new:
index = [option[1] for option in cro_sub_type_options].index(event.new)
new_cro_value = cro_sub_type_options[index][0].split(" - ")[0]
cro_type_field.value = new_cro_value
def get_line(row, update_labels):
cro_type = Dropdown(value=row.cro if pd.notna(
row.cro) else '', options=cro_options, layout=dropdown_layout)
cro_sub_type = Dropdown(value=row.cro_sub_type if pd.notna(
row.cro_sub_type) else '', options=cro_sub_type_options, layout=dropdown_layout)
page = BoundedIntText(
value=row.page if pd.notna(row.page) else '',
min=0,
max=999,
step=1,
layout=int_layout,
disabled=True
)
paragraph_no = BoundedIntText(
value=row.paragraph_no if pd.notna(row.paragraph_no) else '',
min=0,
max=999,
step=1,
layout=int_layout,
disabled=True
)
label = Dropdown(value=row.label if pd.notna(
row.label) else '', options=label_options, layout=dropdown_layout)
text = Textarea(
value=row.text if
|
pd.notna(row.text)
|
pandas.notna
|
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import pickle
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt # for making plots,
from src.utils.model_summary_plots import visual_model_summary
# from src.utils.method_comparison_tools import method_comparison_boxplot # copied here for any potential changes,
# Function ..........................................................................
def create_class_colors_dict(*,
list_of_unique_names,
cmap_name="tab20",
cmap_colors_from=0,
cmap_colors_to=1
):
'''Returns dictionary that maps each class name in list_of_unique_names,
to to a distinct RGB color
. list_of_unique_names : list with unique, full names of clasesses, group etc..
. cmap_name : standard mpl colormap name.
. cmap_colors_from, cmap_colors_to, values between 0 and 1,
used to select range of colors in cmap,
'''
# create cmap
mycmap = plt.cm.get_cmap(cmap_name, len(list_of_unique_names)*10000)
newcolors = mycmap(np.linspace(cmap_colors_from, cmap_colors_to, len(list_of_unique_names)))
class_color_dict = dict()
for i, un in enumerate(list_of_unique_names):
class_color_dict[un] = newcolors[i]
return class_color_dict
# Function .............................................................................
def load_summary_files(*,
dataset_name,
dataset_variants,
module_names,
ai_methods,
keywords,
path_results,
verbose=False
):
# assure that you have proper datastructures
if isinstance(dataset_variants, str):
dataset_variants = [dataset_variants]
else:
pass
if isinstance(module_names, str):
module_names = [module_names]
else:
pass
if isinstance(ai_methods, str):
ai_methods = [ai_methods]
else:
pass
if isinstance(keywords, str):
keywords = [keywords]
else:
pass
# collect names of files that will be loaded
file_counter=0
for ai_method in ai_methods:
for dataset_variant in dataset_variants:
for module_name in module_names:
if verbose==True:
print("Loading files for: ", ai_method, dataset_variant, module_name, "Found: ", end="")
else:
pass
# path
rpath = os.path.join(path_results, f"{ai_method}__{dataset_name}__{dataset_variant}")
os.chdir(rpath)
# find all files in rpath
files = []
for file in glob.glob("*"):
files.append(file)
# select all with keywords,
files_s = pd.Series(files)
for k in keywords:
files_s = files_s.loc[files_s.str.contains(k)]
files_l = files_s.values.tolist()
# info part 2,
if verbose==True:
print(len(files_s)>0, "files")
else:
pass
# load files
if len(files_s)>0:
for file_name in files_l :
loaded_df = pd.read_csv(file_name)
loaded_df["file_name"]=[file_name]*loaded_df.shape[0]
loaded_df["path"]=[rpath]*loaded_df.shape[0]
if file_counter==0:
summary_df = loaded_df
file_counter += 1
else:
summary_df = pd.concat([summary_df, loaded_df], axis=0)
summary_df.reset_index(inplace=True, drop=True)
else:
pass
# info part 2,
if verbose==True:
print("----> Final Table has results for ", summary_df.shape[0], " models")
else:
pass
return summary_df
# Function .............................................................................
def create_new_df_feature(*, df, new_feature_name, old_features_list, return_full_df=True, verbose=False):
'''
create new feature by concatanating corresponsing cells
in pd dataframe form any number of other selected features
old_features_list: str, or list, with name/s of feature to be concatenated
return_full_df : bool, if True entire df is retuned
if False, return pd.series only with thenew feature
'''
if isinstance(old_features_list, str):
old_features_list = [old_features_list]
else:
pass
# check if all feaqtures are available
stop_the_function = False
for i, feature in enumerate(old_features_list):
try:
df.loc[:, feature]
except:
stop_the_function = True
if verbose==True:
print(f"ERROR: {feature} -- was not found in dataframe")
else:
pass
# concatanate values in each corresponding cell
if stop_the_function==True:
return None
else:
for i, feature in enumerate(old_features_list):
if i==0:
new_feature = df.loc[:, feature].values.tolist()
else:
another_new_feature = df.loc[:, feature].values.tolist()
new_feature = [f"{x}__{y}" for (x,y) in zip(new_feature, another_new_feature)]
if return_full_df==True:
df[new_feature_name] = new_feature
return df
else:
return pd.Series(new_feature)
# Function .............................................................................
def simple_visual_model_summary(*,
model_idx_in_sorted_summary_df=0,
subset_collection_name,
batch_names_list,
summary_df,
class_labels_configs,
path_data,
path_results,
N_displayed_images ="all",
max_img_per_col = 15,
fontsize_scale= 1
):
'''
Temporary function used only with FastClassAI pipeline, that will load raw images
from all batches in a given subset batch collection, eg batch 1 and 2 for test data,
then it will plot 3 figures
- 1st figure - images grouped by class assigned with the model and with color boxes showing true class
- 2nd/3rd figure - pie charts showing sencsitivity and specificity in pie charts
PS: I am working on better version
'''
# ....
idx_in_df = model_idx_in_sorted_summary_df
# *** find names and models to load
# sort summary df
sorted_summary_df = summary_df.sort_values('model_acc_valid', ascending=False)
sorted_summary_df.reset_index(inplace=True, drop=True)
# get all variables,
method = sorted_summary_df.method.iloc[idx_in_df]
dataset_name = sorted_summary_df.dataset_name.iloc[idx_in_df]
dataset_variant = sorted_summary_df.dataset_variant.iloc[idx_in_df]
model_ID = sorted_summary_df.model_ID.iloc[idx_in_df] # its an ID number given to the model in that dictionary,
# *** paths
path_to_raw_images_sorted_into_batches = os.path.join(path_data, f'{dataset_name}__{dataset_variant}')
path_to_batch_labels = os.path.join(path_data, f'{dataset_name}__{dataset_variant}__extracted_features')
path_to_model_predictions = os.path.join(path_results, f'{method}__{dataset_name}__{dataset_variant}')
# *** load data
# load model predictions,
os.chdir(path_to_model_predictions)
model_predictions_file_name = re.sub("summary_table.csv", "model_predictions_dict.p", sorted_summary_df.file_name.iloc[idx_in_df])
with open(model_predictions_file_name , 'rb') as file:
model_predictions_dict = pickle.load(file)
# get class_label colors,
class_labels_colors_toUse = class_labels_configs[dataset_variant]['class_labels_colors']
# caulate accuracy results
acc_results = f'acc={np.round(sorted_summary_df.loc[:, f"model_acc_{subset_collection_name}"].iloc[idx_in_df],2)}'
# display examples from best performing model,
visual_model_summary(
model_predictions_dict = model_predictions_dict,
model_ID = model_ID, # applicable only with a given model_predictions_dict
# what predicitons to display,
n = N_displayed_images, # use "all" to display all
examples_to_plot = "all", # correct and incorrect on the same plot,
class_colors = class_labels_colors_toUse,
# input data,
dataset_name = dataset_name,
subset_name = [subset_collection_name], # name used in xy_names eg: train, valid, test test_2
img_batch_subset_names = batch_names_list, # list, batch names that were placed in that collection,
path_to_raw_img_batch = path_to_raw_images_sorted_into_batches,
# ... settings for main plot,
title_prefix = f"{subset_collection_name}, {acc_results}",
make_plot_with_img_examples = True, # use False, to have only pie charts with classyfication summary
add_proba_values_to_img_name = True,
max_img_per_col = max_img_per_col,
# ... settings for annot. pie charts,
first_pie_title =f"Image Classyfication Results - True Class in pie chart ring\n{subset_collection_name} data",
second_pie_title =f"Class Detection Results - True Class in pie chart center \n{subset_collection_name} data",
pie_data_for_all_images_in_img_batch=True,
pie_charts_in_ne_row = 7,
# ... pie chart aestetics added later to tune pie charts
PIE_legend_loc = "upper right",
PIE_ax_title_fonsize_scale=0.6*fontsize_scale,
PIE_legend_fontsize_scale=1.4*fontsize_scale,
PIE_wedges_fontsize_scale=1*fontsize_scale,
PIE_legend_ncol=4,
PIE_tight_lyout=False,
PIE_title_ha="right",
PIE_figsze_scale=1.5,
PIE_subplots_adjust_top=0.75,
PIE_ax_title_fontcolor="black"
)
# Function .............................................................................
def prepare_summary_df(*,
dataset_name,
dataset_variants,
module_names,
ai_methods,
keywords,
path_results,
verbose=False
):
'''
helper function that loads results from model evaluation,
for all combinaiton of dataset_name, dataset_variants, module_names, ai_methods
and keywords, that allow to find one or more csv file names (order of keywords is not important)
it will provide only files wiht exact match for all keywords, if nothing is returned, set verbose==True,
ai_method, dataset_name, and dataset_variants, are used to build folder names in path_results
whereas keywords and module names are used to find files
caution, the function load_summary_files requires module names for iteration, but these are not used to find files,
it was an error that will be removed, if required results for speciffic module, place it name in keywords,
and only files created for that module will be loaded
'''
summary_df = load_summary_files(
dataset_name = dataset_name,
dataset_variants = dataset_variants,
module_names = module_names,
ai_methods = ai_methods,
keywords = keywords,
path_results = path_results,
verbose = verbose
)
summary_df = create_new_df_feature(
df = summary_df,
new_feature_name = "full_method_name",
old_features_list = ["method", "method_variant"],
)
summary_df = create_new_df_feature(
df = summary_df,
new_feature_name = "full_dataset_variant",
old_features_list = ["dataset_variant", 'module'],
verbose=True
)
summary_df = create_new_df_feature(
df = summary_df,
new_feature_name = "full_results_group_name",
old_features_list = ["method", "method_variant", "dataset_variant", 'module'],
)
return summary_df
# Function ..............................................................................
def method_comparison_boxplot(*,
title="Accuracy of models created with each method\n",
data, # pd.DataFrame with the results,
figsize=(10,4),
# ...
col_with_results, # df colname with values to display, eg: test_accuracy ...
col_with_group_names, # df colname with values that will be displayed as names of each box (these do not have to be unique)
col_with_group_ID, # df colname with values that will be grouped for separate boxes (must be unieque)
col_with_group_colors, # df colname with values that will have different colors (colors can not be mixed within diffeent group_ID)
# ... colors
cmap="tab10",
cmap_colors_from=0,
cmap_colors_to=1,
# .. legend
legend__bbox_to_anchor=(0.9, 1.15),
subplots_adjust_top = 0.8,
legend_ncols=4,
# .. baseline
baseline_title="", # "most frequent baseline",
baseline_loc = -0.05,
baseline = 0.25,
top_results = 0.9, # green zone on a plot,
# ... fontsize
title_fontsize=20,
legend_fontsize=10,
xticks_fontsize=10,
yticks_fontsize=15,
axes_labels_fontsize=20,
# ... axies labels
xaxis_label = "Method",
yaxis_label = "Accuracy\n",
paint_xticks=False
):
"""
Nice function to create ngs-like boxplots for comparison of acc of differemnt model groups
it is more generic version of the abofe function,
"""
# ...............................................
# managment
Stop_Function = False
# ...............................................
# data preparation - step.1 extraction
# ...............................................
# - extract unique values that will be searched,
unique_group_ID = data.loc[:,col_with_group_ID].unique().tolist()
unique_group_color_names = data.loc[:,col_with_group_colors].unique().tolist()
# - map colors onto color_groups_names
bx_color_dict = create_class_colors_dict(
list_of_unique_names=unique_group_color_names,
cmap_name=cmap,
cmap_colors_from=cmap_colors_from,
cmap_colors_to=cmap_colors_to
)
# - lists with data for boxes,
'one item for one box in each'
bx_data = []
bx_names = []
bx_colors = []
bx_id = []
bx_colors_dict_key = []
# - separate all boxes, and then find out what is the color and data associated with that box
for one_group_ID in unique_group_ID:
bx_id.append(one_group_ID)
# get the data and other columns for one box
data_df_for_one_box = data.loc[data.loc[:, col_with_group_ID]==one_group_ID,:]
# find out, data, name and color to display
# .... num. data ....
bx_data.append(data_df_for_one_box.loc[:,col_with_results].values) # np.array
# .... labels .....
one_bx_label = data_df_for_one_box.loc[:,col_with_group_names].unique().tolist()
if len(one_bx_label)==1:
bx_names.append(one_bx_label[0]) # np.array
else:
if verbose==1:
print(f"{one_group_ID} contains more then one group to display wiht different names !")
else:
Stop_Function = True
pass
# .... colors ....
one_box_color = data_df_for_one_box.loc[:,col_with_group_colors].map(bx_color_dict).iloc[0]
color_test_values = data_df_for_one_box.loc[:,col_with_group_colors].unique().tolist()
if len(color_test_values)==1:
bx_colors.append(one_box_color) # np.array
bx_colors_dict_key.append(color_test_values[0])
else:
if verbose==1:
print(f"{one_group_ID} contains more then one COLOR to display wiht different names !")
else:
Stop_Function = True
pass
# - check if everythign is in order
if len(bx_colors)!=len(bx_names) and len(bx_names)!=len(bx_data):
if verbose==True:
print("Error: some data are missing or belong to different gorups, and can not be displayed as coherent bocplot")
else:
pass
else:
# ...............................................
# data preparation - step.2 ordering
# ...............................................
# find memdians and reorder
bx_medians = list()
for i, d in enumerate(bx_data):
bx_medians.append(np.median(d))
# ...
ordered_data_df = pd.DataFrame({
"bx_data": bx_data,
"bx_medians": bx_medians,
"bx_names": bx_names,
"bx_colors": bx_colors,
"bx_id": bx_id,
"bx_colors_dict_key":bx_colors_dict_key
})
ordered_data_df = ordered_data_df.sort_values("bx_medians", ascending=True)
ordered_data_df = ordered_data_df.reset_index(drop=True)
# ...............................................
# boxplot
# ...............................................
# ...............................................
# boxplot, - plt.boxplot(ordered_bx_data);
fig, ax = plt.subplots(figsize=figsize, facecolor="white")
fig.suptitle(title, fontsize=title_fontsize)
# add boxes,
bx = ax.boxplot(ordered_data_df["bx_data"],
showfliers=True, # remove outliers, because we are interested in a general trend,
vert=True, # boxes are vertical
labels=ordered_data_df["bx_names"], # x-ticks labels
patch_artist=True,
widths=0.3
)
ax.grid(ls="--")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_xticklabels(ordered_data_df["bx_names"], rotation=45, fontsize=xticks_fontsize, ha="right")
ax.set_yticks([0, .2, .4, .6, .8, 1])
ax.set_yticklabels(["0.0", "0.2", "0.4", "0.6", "0.8", "1.0"], fontsize=yticks_fontsize)
ax.set_ylabel(yaxis_label, fontsize=axes_labels_fontsize)
ax.set_xlabel(xaxis_label, fontsize=axes_labels_fontsize)
ax.set_ylim(0,1.02)
# add colors to each box individually,
for i, j in zip(range(len(bx['boxes'])),range(0, len(bx['caps']), 2)) :
median_color ="black"
box_color = bx_color_dict[ordered_data_df.loc[:,"bx_colors_dict_key"].iloc[i]]
# set properties of items with the same number as boxes,
plt.setp(bx['boxes'][i], color=box_color, facecolor=median_color, linewidth=2, alpha=0.8)
plt.setp(bx["medians"][i], color=median_color, linewidth=2)
plt.setp(bx["fliers"][i], markeredgecolor="black", marker=".") # outliers
# set properties of items with the 2x number of features as boxes,
plt.setp(bx['caps'][j], color=median_color)
plt.setp(bx['caps'][j+1], color=median_color)
plt.setp(bx['whiskers'][j], color=median_color)
plt.setp(bx['whiskers'][j+1], color=median_color)
# ...............................................
# set colors for xtick labels,
if paint_xticks==True:
for i, xtick in enumerate(ax.get_xticklabels()):
xtick.set_color(bx_color_dict[ordered_data_df["bx_colors_dict_key"].iloc[i]])
else:
pass
# ...............................................
# legend,
if ordered_data_df["bx_names"].shape[0]>0:
# create patch for each dataclass, - adapted to even larger number of classes then selected for example images,
patch_list_for_legend =[]
for i, m_name in enumerate(list(bx_color_dict.keys())):
label_text = f"{m_name}"
patch_list_for_legend.append(mpl.patches.Patch(color=bx_color_dict[m_name], label=label_text))
# add patches to plot,
fig.legend(
handles=patch_list_for_legend, frameon=False,
scatterpoints=1, ncol=legend_ncols,
bbox_to_anchor=legend__bbox_to_anchor, fontsize=legend_fontsize)
# ...............................................
# create space for the legend
fig.subplots_adjust(top=subplots_adjust_top)
# ...............................................
# ...............................................
# add line with baseline
ax.axhline(baseline, lw=2, ls="--", color="dimgrey")
ax.text(ordered_data_df.shape[0]+0.4, baseline+baseline_loc, baseline_title, ha="right", color="dimgrey", fontsize=yticks_fontsize)
# ...............................................
# color patches behing boxplots,
patch_width = 1 # ie. 1 = grey patch for 1 and 1 break
patch_color = "lightgrey"
pathces_starting_x = list(range(0, ordered_data_df.shape[0], patch_width*2))
# ...
for i, sx in enumerate(pathces_starting_x):
rect = plt.Rectangle((sx+0.5, 0), patch_width, 1000, color=patch_color, alpha=0.2, edgecolor=None)
ax.add_patch(rect)
# color patches for styling the accuracy,
rect = plt.Rectangle((0,0), ordered_data_df.shape[0]*100, baseline, color="red", alpha=0.1, edgecolor=None)
ax.add_patch(rect)
rect = plt.Rectangle((0,baseline), ordered_data_df.shape[0]*100, top_results-baseline, color="orange", alpha=0.1, edgecolor=None)
ax.add_patch(rect)
rect = plt.Rectangle((0, top_results), ordered_data_df.shape[0]*100, 10, color="forestgreen", alpha=0.1, edgecolor=None)
ax.add_patch(rect)
return fig
# Function .............................................................................
def create_boxplot_with_color_classes(*,
summary_df,
figsize = (10,6),
col_with_results ="model_acc_valid", # df colname with values to display, eg: test_accuracy ...
col_with_group_names ="full_method_name", # df colname with values that will be displayed as names of each box (these do not have to be unique)
col_with_group_ID ="full_results_group_name", # df colname with values that will be grouped for separate boxes (must be unieque)
col_with_group_colors="full_dataset_variant", # df colname with values that will have different colors (colors can not be mixed within diffeent group_ID)
baseline = 0.5,
fontsize_scale=1,
subplots_adjust_top = 0.6,
baseline_title ="baseline",
legend_ncols=1,
legend__bbox_to_anchor=(0.5, 1.1),
):
'''
Funtion returns boxplot showing accuracy or other metric displayed for any number
of groups of results (eg methods), divided into larger groups shown as different colors of boyes,
with color legen above the plot
summary_df : summary dataframe created with prepare_summary_df function,
col_with_results : str, df colname with values to display, eg: test_accuracy ...
col_with_group_names : str, df colname with values that will be displayed as
names of each box (these do not have to be unique)
col_with_group_ID : str, df colname with values that will be grouped for
separate boxes (must be unieque)
col_with_group_colors: str, df colname with values that will have different colors
(colors can not be mixed within diffeent group_ID)
'''
# boxplot
fig = method_comparison_boxplot(
title=f"Accuracy of models created with each method\n\n",
data = summary_df, # pd.DataFrame with the results,
figsize=figsize,
# ...
col_with_results =col_with_results, # df colname with values to display, eg: test_accuracy ...
col_with_group_names =col_with_group_names , # df colname with values that will be displayed as names of each box (these do not have to be unique)
col_with_group_ID =col_with_group_ID, # df colname with values that will be grouped for separate boxes (must be unieque)
col_with_group_colors=col_with_group_colors, # df colname with values that will have different colors (colors can not be mixed within diffeent group_ID)
# ... colors
cmap="tab10",
cmap_colors_from=0,
cmap_colors_to=0.5,
# .. legend
legend__bbox_to_anchor=(0.5, 1.1),
subplots_adjust_top = subplots_adjust_top,
legend_ncols=legend_ncols,
# .. baseline
baseline_title =baseline_title,
baseline_loc =-0.09,
baseline = baseline,
top_results = 0.9, # green zone on a plot,
# ... fontsize
title_fontsize=20*fontsize_scale,
legend_fontsize=10*fontsize_scale,
xticks_fontsize=10*fontsize_scale,
yticks_fontsize=15*fontsize_scale,
axes_labels_fontsize=15*fontsize_scale,
# ... axies labels
xaxis_label = "Method",
yaxis_label = "Accuracy\n",
paint_xticks=True
)
return fig
# Function .............................................................................
def preapre_table_with_n_best_results_in_each_group(*,
summary_df,
n_top_methods = 1,
sort_by = "model_acc_valid",
feature_used_to_group_models = "full_results_group_name",
display_table=False
):
'''
Function that takes summary df, selectes max n requested best perfoming models,
and return them all in sorted summary df table format,
if display_table==True, displays selected columns from that table to show all examples,
'''
# unique model group names
method_full_name_list = summary_df.loc[:, feature_used_to_group_models].unique().tolist()
# collect top methods,
for i, method_full_name in enumerate(method_full_name_list):
# . subset summary_df
summary_df_subset = summary_df.loc[summary_df.loc[:, feature_used_to_group_models]==method_full_name, :]
summary_df_subset = summary_df_subset.sort_values(sort_by, ascending=False)
# . place in
if i==0:
best_methods_summary_df = summary_df_subset.iloc[0:n_top_methods,:]
else:
best_methods_summary_df = pd.concat([best_methods_summary_df, summary_df_subset.iloc[0:n_top_methods,:]])
best_methods_summary_df.reset_index(drop=True, inplace=True)
# display examples:
# show best model examples
features_to_display = ["dataset_variant", "module","method", "method_variant",
"model_acc_train", "model_acc_valid", "model_acc_test",
"pca_components_used", "run_name"]
sorted_best_methods_summary_df = best_methods_summary_df.sort_values("model_acc_valid", ascending=False)
sorted_best_methods_summary_df.reset_index(drop=True, inplace=True)
if display_table==True:
features_to_display = ["dataset_variant", "module","method", "method_variant",
"model_acc_train", "model_acc_valid", "model_acc_test", "baseline_acc_test",
"pca_components_used", "run_name"]
display(sorted_best_methods_summary_df.loc[:, features_to_display])
else:
pass
return sorted_best_methods_summary_df
# Function .............................................................................
def model_summary_plot(*,
# input data
df,
y,
boxname,
boxcolor,
scatterpoints,
baseline,
# fig, general settings,
title=None ,
figsize=(30,15) ,
# box colors
boxcolor_dict = None,
cmap="tab10",
cmap_colors_from=0,
cmap_colors_to=0.5,
# axes
xaxis_label = None,
yaxis_label = None, # if Noene == ydata_colname
grid_dct=dict(lw=1),
# scatterpoints,
full_model_marker ="*",
full_model_markersize=60,
full_model_markercolor="black",
# legend
add_legend=True,
subplots_adjust_top = 0.7,
legend_title=None,
legend__bbox_to_anchor=(0.4, 0.9),
legend_ncols=1,
# baseline
baseline_title = "baseline",
baseline_loc =-0.09,
use_fixed_baselines = True,
baseline_limit_list = [0.5, 0.9, 1.5], # the last one
baseline_color_list = ["red", "orange", "forestgreen"],
# fontsizes
fontsize_scale =1,
title_fontsize =30,
legend_fontsize=20,
xticks_fontsize=20,
yticks_fontsize=20,
axes_labels_fontsize=25,
):
'''
NGS-like boxplot for displaying accuracy, or other results obtained with large number of models
# input data
df : pd.DataFrame
y : str, or list with values, df colname with values to display, eg: test_accuracy ...
boxname : str, or list with values, df colname with values that will be displayed as names of each box, if None,
(these, do not have to be unique, becaue box colors are also informative,
and you may use shorter names to make the plot nicer, )
boxcolor : str, or list with values, if None, all boxes will hae the same colors, and there is no legend displayed,
scatterpoints : list, with True/False values, data points in each group used as scatter points,
not the part of boxplot, if None, noe will be made,
baseline : str, or list with values, df colname with values for baseline thta will be displayed on a bacground,
# horizontal patches
use_fixed_baselines : bool , if True, three phorizontal patches of the same height will be added to plot,
baseline_limit_list : list with 3 floats, eg: [0.5, 0.9, 1.5], each float is the upper limit of the horizontal patch,
starting from the plot bottom
'''
# setup
assert type(df)==pd.DataFrame, "error: df is not pandas DataFrame"
# . set plot x/y labels,
if xaxis_label is None:
if isinstance(boxname, str):
xaxis_label=boxname
else:
xaxis_label="method"
if yaxis_label is None:
if isinstance(y, str):
yaxis_label=y
else:
yaxis_label="y"
# . fontsizes
title_fontsize = title_fontsize*fontsize_scale
legend_fontsize = legend_fontsize*fontsize_scale
xticks_fontsize = xticks_fontsize*fontsize_scale
yticks_fontsize = yticks_fontsize*fontsize_scale
axes_labels_fontsize = axes_labels_fontsize*fontsize_scale
# data preparation
# . extract columns, as list
if isinstance(y , str):
y = df.loc[:, y].values.tolist()
else:
pass
if isinstance(boxname , str):
boxname = df.loc[:, boxname].values.tolist()
else:
pass
#. optional values,
if boxcolor is not None:
if isinstance(boxcolor , str):
boxcolor = df.loc[:, boxcolor].values.tolist()
else:
pass
else:
boxcolor = ["method"]*len(y)
if baseline is not None:
if isinstance(baseline , str):
baseline = df.loc[:, baseline].values.tolist()
else:
pass
else:
baseline = [0.5]*len(y)
if scatterpoints is not None:
if isinstance(scatterpoints , str):
scatterpoints = df.loc[:, scatterpoints].values.tolist()
else:
pass
else:
scatterpoints = [False]*len(y) # ie, No data wil be plotted as scatter point,
# . create unique boxnames qwith colors and method names,
if boxcolor is not None:
boxname_full = [f"{x}__{y}" for (x,y) in zip (boxname, boxcolor)] # used to search values,
else:
boxname_full = boxname
# assign colors to each boxcolor name
# . define colors for each class in boccolor
if boxcolor_dict is None:
boxcolor_dict = create_class_colors_dict(
list_of_unique_names = pd.Series(boxcolor).unique().tolist(),
cmap_name = cmap,
cmap_colors_from = cmap_colors_from,
cmap_colors_to = cmap_colors_to
)
else:
pass
# . map colors onto boxcolor, that are names
boxcolor_value = pd.Series(boxcolor).map(boxcolor_dict)
# build pandas df wiht all data
boxplotdf = pd.DataFrame({
"y": y, # value on y-axis
"boxname_full": boxname_full, # used to separate each box (combines x-axis anme and color)
"boxcolor_value": boxcolor_value, # color for bocplot,
"boxname":boxname, # displayed on x axis,
"boxcolor":boxcolor, # displayed on legend,
"baseline": baseline, # displayed as bacground value,
"scatterpoints": scatterpoints, # it True, the point is plotted as scatterplot,
})
# data preparation - part 2 - prepare array and ncols for plot
# . lists with data for boxes,
'one item for one box in each'
x_axis_name = [] # xtick labels
x_axis_color = [] # xtick label color
bx_x = []
bx_y = []
bx_color = [] # box color, (only for boxes)
sc_y = []
sc_x = []
baseline_x = []
baseline_y = []
median_y = []
# . fill in values, in proper order with positons on x axis,
for i, one_boxname_full in enumerate(pd.Series(boxname_full).unique().tolist()):
# find data for boxes
boxplotdf_bx_subset = boxplotdf.loc[(boxplotdf.boxname_full==one_boxname_full) & (boxplotdf.scatterpoints==False), :]
if boxplotdf_bx_subset.shape[0]>0:
bx_x.append(i)
bx_y.append(boxplotdf_bx_subset.loc[:,"y"].values.tolist())
bx_color.append(boxplotdf_bx_subset.boxcolor_value.iloc[0])
else:
pass
# find data for scatter points,
boxplotdf_sc_subset = boxplotdf.loc[(boxplotdf.boxname_full==one_boxname_full) & (boxplotdf.scatterpoints==True), :]
sc_values = boxplotdf_sc_subset.loc[:,"y"].values.tolist()
if len(sc_values)>0:
sc_x.extend([i]*len(sc_values))
sc_y.extend(sc_values)
else:
pass
# axis_name, baseline,
boxplotdf_group_subset = boxplotdf.loc[boxplotdf.boxname_full==one_boxname_full, :]
baseline_x.append(i)
baseline_y.append(boxplotdf_group_subset.baseline.max())
median_y.append(boxplotdf_group_subset.y.median())
x_axis_name.append(boxplotdf_group_subset.boxname.iloc[0])
x_axis_color.append(boxplotdf_group_subset.boxcolor_value.iloc[0])
# order items on x axis,
# . dict with key == old postion, value == new postion
'''
I am using dict, because each item may have different number of
elements, and they are not in order, (ie one category may be nmissing and present in sc or bx)
that is completely normal !
'''
x_order = dict(zip(pd.Series(median_y).sort_values().index.values.tolist(), list(range(len(median_y)))))
bx_x = pd.Series(bx_x).map(x_order).values.tolist()
sc_x = pd.Series(sc_x).map(x_order).values.tolist()
baseline_x = pd.Series(baseline_x).map(x_order).values.tolist()
# . created ordered_xticks_labels
tempdf = pd.concat([pd.Series(median_y),
|
pd.Series(x_axis_color)
|
pandas.Series
|
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected =
|
DataFrame({"col": no_name}, index=index, dtype=np.int32)
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import seaborn as sns
import json
from bs4 import BeautifulSoup
import mwparserfromhell
import pandas as pd
import re
from urllib.request import urlopen
from urllib.parse import urlencode
API_URL = "https://en.wikipedia.org/w/api.php"
wiki_prefix = "https://en.wikipedia.org"
wiki_gal = 'https://en.wikipedia.org/wiki/Gal_Gadot'
html = urlopen(wiki_gal)
soup = BeautifulSoup(html, features='html.parser')
films_table = soup.find('table', class_='wikitable sortable').findAll('tr')
headers = films_table[0]
rows = films_table[1:]
headers = [header.get_text().strip() for header in headers.find_all('th')]
table = [[cell.get_text().strip() for cell in row.find_all('td')]
for row in rows]
for i, actor in enumerate(table):
if not actor[0].isdigit() and actor[0] != 'TBA':
table[i].insert(0, table[i-1][0])
df = pd.DataFrame(data=table, columns=headers)
df = df.drop(['Notes'], axis=1)
|
pd.set_option('display.max_columns', 5)
|
pandas.set_option
|
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser =
|
tm.box_expected(tdi, box)
|
pandas.util.testing.box_expected
|
import pandas as pd
import numpy as np
import click
import h5py
import os
import logging
from array import array
from copy import deepcopy
from tqdm import tqdm
from astropy.io import fits
from fact.credentials import create_factdb_engine
from zfits import FactFits
from scipy.optimize import curve_fit
from joblib import Parallel, delayed
import drs4Calibration.config as config
from drs4Calibration.constants import NRCHID, NRCELL, NRTEMPSENSOR, ROI, ADCCOUNTSTOMILIVOLT
from drs4Calibration.tools import safety_stuff
import matplotlib.pyplot as plt
from time import time
def print_delta_time(time, string=""):
hours = int(time / 3600)
rest = time % 3600
minutes = int(rest / 60)
seconds = round(rest % 60, 2)
print(string+" deltaTime: ", hours, minutes, seconds)
@click.command()
@click.argument('drs_file_list_doc_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/drsFitsFiles.txt",
type=click.Path(exists=False))
def search_drs_fits_files(drs_file_list_doc_path: str):
'''
Search through the fact-database and store the path of all drsFiles
under the given storePath
Args:
drs_file_list_doc_path (str):
Full path to the storeFile
with the extension '.txt'
'''
# TODO check safety stuff. maybe remove
#safety_stuff(drs_file_list_doc_path)
def filename(row):
return os.path.join(
str(row.date.year),
"{:02d}".format(row.date.month),
"{:02d}".format(row.date.day),
"{}_{:03d}.fits.fz".format(row.fNight, row.fRunID),
)
# 40drs4320Bias
drs_infos = pd.read_sql(
"RunInfo",
create_factdb_engine(),
columns=[
"fNight", "fRunID",
"fRunTypeKey", "fDrsStep",
"fNumEvents"])
drs_file_infos = drs_infos.query("fRunTypeKey == 2 &" +
"fDrsStep == 2 &" +
"fNumEvents == 1000").copy()
# fNumEvents == 1000 prevent for unfinished/broken files
drs_file_infos["date"] = pd.to_datetime(drs_file_infos.fNight.astype(str),
format="%Y%m%d")
drs_files = drs_file_infos.apply(filename, axis=1).tolist()
pd.DataFrame(drs_files).to_csv(drs_file_list_doc_path, index=False,
header=False)
@click.command()
@click.argument('drs_file_list_doc_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/selectedDrsFitsFiles.txt",
type=click.Path(exists=True))
@click.argument('store_file_path',
default="/net/big-tank/POOL/" +
"projects/fact/drs4_calibration_data/" +
"calibration/calculation/newBaseline_timeTest.h5",
type=click.Path(exists=False))
@click.argument('source_folder_path',
default="/net/big-tank/POOL/projects/fact/drs4_calibration_data/",
type=click.Path(exists=False))
def store_drs_values(drs_file_list_doc_path, store_file_path, source_folder_path):
with h5py.File(store_file_path, 'w') as hf:
hf.create_dataset(
name="Time", dtype="float32",
shape=(0, 1), maxshape=(None, 1),
compression="gzip", compression_opts=9,
fletcher32=True)
hf.create_dataset(
name="Temperature", dtype="float32",
shape=(0, NRTEMPSENSOR), maxshape=(None, NRTEMPSENSOR),
compression="gzip", compression_opts=9,
fletcher32=True)
hf.create_dataset(
name="NewBaseline", dtype="float32",
shape=(0, NRCHID*NRCELL*ROI), maxshape=(None, NRCHID*NRCELL*ROI),
compression="gzip", compression_opts=9,
fletcher32=True)
class SourceDataSet:
# @resettable
run_begin = pd.to_datetime("")
run_end = pd.to_datetime("")
def __init__(self):
type(self).run_begin = pd.to_datetime("")
type(self).run_end = pd.to_datetime("")
source_data_set = SourceDataSet()
drs_file_list = open(drs_file_list_doc_path).read().splitlines()
for drs_fits_file_path in tqdm(drs_file_list):
drs_fits_file_path = drs_file_list[700] # care!!
date_path_part = drs_fits_file_path.split('_')[0]
drs_fits_file_path = (source_folder_path+"raw/" +
drs_fits_file_path.strip("\n"))
drs_file_path = (drs_fits_file_path.strip("fits.fz") +
".drs.fits.gz")
temp_file_path = (source_folder_path+"aux/" +
date_path_part+".FAD_CONTROL_TEMPERATURE.fits")
if(os.path.isfile(drs_fits_file_path) and os.path.isfile(temp_file_path)):
time_marker1 = time()
with fits.open(drs_file_path,
ignoremissing=True,
ignore_missing_end=True) as drs_table:
source_data_set.run_begin = pd.to_datetime(drs_table[1].header["RUN2-BEG"])
source_data_set.run_end = pd.to_datetime(drs_table[1].header["RUN2-END"])
print(type(source_data_set.run_begin), type(source_data_set.run_end))
time_marker2 = time()
print_delta_time(time_marker2 - time_marker1, "open drs_file_path")
time_marker3 = time()
with fits.open(temp_file_path,
mmap=True,
mode='denywrite',
ignoremissing=True,
ignore_missing_end=True) as table:
table_time = table[1].data["Time"]
table_temperature = table[1].data["temp"]
time_marker4 = time()
print_delta_time(time_marker4 - time_marker3, "open temp_file_path")
print(type(table_time), table_time.shape, type(table_temperature), table_temperature.shape)
time_marker5 = time()
if table_temperature.shape[1] != NRTEMPSENSOR:
temp_filename = temp_file_path.split('/')[-1]
message = (
" File not used: Just "+str(table_temperature.shape[1]) +
" Temperature Values in File '"+temp_filename+"'")
raise Exception(message)
table_datetime =
|
pd.to_datetime(table_time * 24 * 3600 * 1e9)
|
pandas.to_datetime
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
###########################################################################
# Copyright (C) 2013-2016 by Caspar. All rights reserved.
# File Name: txtclf.py
# Author: <NAME>
# E-mail: <EMAIL>
# Created Time: 2016-07-05 14:39:18
###########################################################################
#
import os, sys, difflib, itertools
from time import time
import numpy as np
import scipy as sp
import scipy.stats as stats
import pandas as pd
from sklearn.base import clone
from sklearn.preprocessing import MinMaxScaler, LabelBinarizer, label_binarize, normalize
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold, KFold, GridSearchCV, RandomizedSearchCV
from sklearn import metrics
from .util import io, func, plot
from .util import math as imath
common_cfg = {}
def init(plot_cfg={}, plot_common={}):
if (len(plot_cfg) > 0 and plot_cfg['MON'] is not None):
plot.MON = plot_cfg['MON']
global common_cfg
if (len(plot_common) > 0):
common_cfg = plot_common
def get_featw(pipeline, feat_num):
feat_w_dict, sub_feat_w = [{} for i in range(2)]
filt_feat_idx = feature_idx = np.arange(feat_num)
for component in ('featfilt', 'clf'):
if (type(pipeline) != Pipeline):
if (component == 'featfilt'):
continue
else:
cmpn = pipeline
elif (component in pipeline.named_steps):
cmpn = pipeline.named_steps[component]
else:
continue
if (hasattr(cmpn, 'estimators_')):
for i, estm in enumerate(cmpn.estimators_):
filt_subfeat_idx = feature_idx[:]
if (hasattr(estm, 'get_support')):
filt_subfeat_idx = feature_idx[estm.get_support()]
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(estm, measure)):
filt_subfeat_w = getattr(estm, measure)
subfeat_w = (filt_subfeat_w.min() - 1) * np.ones_like(feature_idx)
# subfeat_w[filt_subfeat_idx] = normalize(estm.feature_importances_, norm='l1')
subfeat_w[filt_subfeat_idx] = filt_subfeat_w
# print 'Sub FI shape: (%s)' % ','.join([str(x) for x in filt_subfeat_w.shape])
# print 'Feature Importance inside %s Ensemble Method: %s' % (component, filt_subfeat_w)
sub_feat_w[(component, i)] = subfeat_w
if (hasattr(component, 'get_support')):
filt_feat_idx = feature_idx[component.get_support()]
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(cmpn, measure)):
filt_feat_w = getattr(cmpn, measure)
# print '*' * 80 + '\n%s\n'%filt_feat_w + '*' * 80
feat_w = (filt_feat_w.min() - 1) * np.ones_like(feature_idx)
# feat_w[filt_feat_idx] = normalize(filt_feat_w, norm='l1')
feat_w[filt_feat_idx] = filt_feat_w
# print '*' * 80 + '\n%s\n'%feat_w + '*' * 80
feat_w_dict[(component, measure)] = feat_w
print('FI shape: (%s)' % ','.join([str(x) for x in feat_w_dict[(component, measure)].shape]))
print('Sample 10 Feature from %s.%s: %s' % (component, measure, feat_w[feat_w > 0][:10]))
# print 'Feature Importance from %s.%s: %s' % (component, measure, feat_w)
return feat_w_dict, sub_feat_w
def get_score(pipeline, X_test, mltl=False):
if ((not isinstance(pipeline, Pipeline) and hasattr(pipeline, 'predict_proba')) or(isinstance(pipeline.named_steps['clf'], OneVsRestClassifier) and hasattr(pipeline.named_steps['clf'].estimators_[0], 'predict_proba')) or (not isinstance(pipeline.named_steps['clf'], OneVsRestClassifier) and hasattr(pipeline, 'predict_proba'))):
if (mltl):
return pipeline.predict_proba(X_test)
else:
# return pipeline.predict_proba(X_test)[:, 1]
return pipeline.predict_proba(X_test)
elif (hasattr(pipeline, 'decision_function')):
return pipeline.decision_function(X_test)
else:
print('Neither probability estimate nor decision function is supported in the classification model!')
return [0] * Y_test.shape[0]
# Benchmark
def benchmark(pipeline, X_train, Y_train, X_test, Y_test, mltl=False, signed=False, average='micro'):
print('+' * 80)
print('Training Model: ')
print(pipeline)
t0 = time()
pipeline.fit(X_train, Y_train)
train_time = time() - t0
print('train time: %0.3fs' % train_time)
t0 = time()
orig_pred = pred = pipeline.predict(X_test)
orig_prob = prob = pipeline.predict_proba(X_test) if hasattr(pipeline, 'predict_proba') else pipeline.decision_function(X_test)
test_time = time() - t0
print('+' * 80)
print('Testing: ')
print('test time: %0.3fs' % test_time)
is_mltl = mltl
if (signed):
Y_test = np.column_stack([np.abs(Y_test).reshape((Y_test.shape[0],-1))] + [label_binarize(lb, classes=[-1,1,0])[:,1] for lb in (np.sign(Y_test).astype('int8').reshape((Y_test.shape[0],-1))).T]) if (len(Y_test.shape) < 2 or Y_test.shape[1] == 1 or np.where(Y_test<0)[0].shape[0]>0) else Y_test
pred = np.column_stack([np.abs(pred).reshape((pred.shape[0],-1))] + [label_binarize(lb, classes=[-1,1,0])[:,1] for lb in (np.sign(pred).astype('int8').reshape((pred.shape[0],-1))).T]) if (len(pred.shape) < 2 or pred.shape[1] == 1 or np.where(pred<0)[0].shape[0]>0) else pred
is_mltl = True
try:
accuracy = metrics.accuracy_score(Y_test, pred)
except ValueError as e:
print(e)
Y_test, pred = Y_test.ravel(), pred.ravel()
accuracy = metrics.accuracy_score(Y_test, pred)
print('accuracy: %0.3f' % accuracy)
if (is_mltl and average == 'all'):
micro_precision = metrics.precision_score(Y_test, pred, average='micro')
print('micro-precision: %0.3f' % micro_precision)
micro_recall = metrics.recall_score(Y_test, pred, average='micro')
print('micro-recall: %0.3f' % micro_recall)
micro_fscore = metrics.fbeta_score(Y_test, pred, beta=1, average='micro')
print('micro-fscore: %0.3f' % micro_fscore)
macro_precision = metrics.precision_score(Y_test, pred, average='macro')
print('macro-precision: %0.3f' % macro_precision)
macro_recall = metrics.recall_score(Y_test, pred, average='macro')
print('macro-recall: %0.3f' % macro_recall)
macro_fscore = metrics.fbeta_score(Y_test, pred, beta=1, average='macro')
print('macro-fscore: %0.3f' % macro_fscore)
else:
precision = metrics.precision_score(Y_test, pred, average=average if is_mltl else 'binary')
print('precision: %0.3f' % precision)
recall = metrics.recall_score(Y_test, pred, average=average if is_mltl else 'binary')
print('recall: %0.3f' % recall)
fscore = metrics.fbeta_score(Y_test, pred, beta=1, average=average if is_mltl else 'binary')
print('fscore: %0.3f' % fscore)
print('classification report:')
# print metrics.classification_report(Y_test, pred)
metric_df = pd.DataFrame(metrics.classification_report(Y_test, pred, output_dict=True)).T[['precision', 'recall', 'f1-score', 'support']]
print(metric_df)
print('confusion matrix:')
if (is_mltl):
pass
else:
print(metrics.confusion_matrix(Y_test, pred))
print('+' * 80)
clf = pipeline.named_steps['clf'] if (type(pipeline) is Pipeline) else pipeline
if ((isinstance(clf, OneVsRestClassifier) and hasattr(clf.estimators_[0], 'predict_proba')) or (not isinstance(clf, OneVsRestClassifier) and hasattr(pipeline, 'predict_proba'))):
if (mltl):
scores = pipeline.predict_proba(X_test)
if (type(scores) == list):
scores = np.concatenate([score[:, -1].reshape((-1, 1)) for score in scores], axis=1)
else:
scores = pipeline.predict_proba(X_test)[:, -1]
elif (hasattr(pipeline, 'decision_function')):
scores = pipeline.decision_function(X_test)
else:
print('Neither probability estimate nor decision function is supported in the classification model! ROC and PRC figures will be invalid.')
scores = [0] * Y_test.shape[0]
if (signed and (len(scores.shape) < 2 or scores.shape[1] < pred.shape[1])):
scores = np.concatenate([np.abs(scores).reshape((scores.shape[0],-1))] + [label_binarize(lb, classes=[-1,1,0])[:,:2] for lb in (np.sign(scores).astype('int8').reshape((scores.shape[0],-1))).T], axis=1)
if (is_mltl):
if ((len(Y_test.shape) == 1 or Y_test.shape[1] == 1) and len(np.unique(Y_test)) > 2):
lbz = LabelBinarizer()
Y_test = lbz.fit_transform(Y_test)
def micro():
# Micro-average ROC curve
y_true = np.array(Y_test)
s_array = np.array(scores)
if (len(s_array.shape) == 3):
s_array = s_array[:,:,1].reshape((s_array.shape[0],s_array.shape[1],))
if (y_true.shape[0] == s_array.shape[1] and y_true.shape[1] == s_array.shape[0]):
s_array = s_array.T
return metrics.roc_curve(y_true.ravel(), s_array.ravel())
def macro():
# Macro-average ROC curve
n_classes = Y_test.shape[1]
fpr, tpr = [dict() for i in range(2)]
for i in range(n_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(Y_test[:, i], scores[:, i])
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
return all_fpr, mean_tpr, _
if (average == 'micro'):
roc = micro()
elif (average == 'macro'):
roc = macro()
elif (average == 'all'):
micro_roc = micro()
macro_roc = macro()
if (type(scores) == list):
scores = np.array(scores)[:,:,0]
prc = metrics.precision_recall_curve(Y_test.ravel(), scores.ravel()) # Only micro-prc is supported
else:
roc = metrics.roc_curve(Y_test, scores)
prc = metrics.precision_recall_curve(Y_test, scores)
# print 'ROC:\n%s\n%s' % (roc[0], roc[1])
# print 'PRC:\n%s\n%s' % (prc[0], prc[1])
print('Training and Testing X shape: %s; %s' % (', '.join(['(%s)' % ','.join([str(x) for x in X.shape]) for X in X_train]) if type(X_train) is list else '(%s)' % ','.join([str(x) for x in X_train.shape]), ', '.join(['(%s)' % ','.join([str(x) for x in X.shape]) for X in X_test]) if type(X_test) is list else '(%s)' % ','.join([str(x) for x in X_test.shape])))
feat_w_dict, sub_feat_w = [{} for i in range(2)]
filt_feat_idx = feature_idx = np.arange(X_train[0].shape[1] if type(X_train) is list else X_train.shape[1])
for component in ('featfilt', 'clf'):
if (type(pipeline) != Pipeline):
if (component == 'featfilt'):
continue
else:
cmpn = pipeline
elif (component in pipeline.named_steps):
cmpn = pipeline.named_steps[component]
else:
continue
if (hasattr(cmpn, 'estimators_')):
for i, estm in enumerate(cmpn.estimators_):
filt_subfeat_idx = filt_feat_idx[:]
if (hasattr(estm, 'get_support')):
filt_subfeat_idx = filt_feat_idx[estm.get_support()]
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(estm, measure)):
filt_subfeat_w = getattr(estm, measure)
subfeat_w = (filt_subfeat_w.min() - 1) * np.ones_like(feature_idx)
# subfeat_w[filt_subfeat_idx][:len(estm.feature_importances_)] = normalize(estm.feature_importances_, norm='l1')
subfeat_w[filt_subfeat_idx][:len(filt_subfeat_w)] = filt_subfeat_w
# print 'Sub FI shape: (%s)' % ','.join([str(x) for x in filt_subfeat_w.shape])
# print 'Feature Importance inside %s Ensemble Method: %s' % (component, filt_subfeat_w)
sub_feat_w[(component, i)] = subfeat_w
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(cmpn, measure)):
filt_feat_w = getattr(cmpn, measure)
# print '*' * 80 + '\n%s\n'%filt_feat_w + '*' * 80
feat_w = (filt_feat_w.min() - 1) * np.ones_like(feature_idx)
# feat_w[filt_feat_idx][:filt_feat_w.shape[1] if len(filt_feat_w.shape) > 1 else len(filt_feat_w)] = normalize(filt_feat_w[1,:] if len(filt_feat_w.shape) > 1 else filt_feat_w, norm='l1')
feat_w[filt_feat_idx][:filt_feat_w.shape[1] if len(filt_feat_w.shape) > 1 else len(filt_feat_w)] = filt_feat_w[1,:] if len(filt_feat_w.shape) > 1 else filt_feat_w
# print '*' * 80 + '\n%s\n'%feat_w + '*' * 80
feat_w_dict[(component, measure)] = feat_w
print('FI shape: (%s)' % ','.join([str(x) for x in feat_w_dict[(component, measure)].shape]))
print('Sample 10 Feature from %s.%s: %s' % (component, measure, feat_w[feat_w > 0][:10]))
# print 'Feature Importance from %s.%s: %s' % (component, measure, feat_w)
if (hasattr(cmpn, 'get_support')):
filt_feat_idx = filt_feat_idx[cmpn.get_support()]
print('\n')
if (is_mltl and average == 'all'):
return {'accuracy':accuracy, 'micro-precision':micro_precision, 'micro-recall':micro_recall, 'micro-fscore':micro_fscore, 'macro-precision':macro_precision, 'macro-recall':macro_recall, 'macro-fscore':macro_fscore, 'train_time':train_time, 'test_time':test_time, 'micro-roc':micro_roc, 'macro-roc':macro_roc, 'prc':prc, 'feat_w':feat_w_dict, 'sub_feat_w':sub_feat_w, 'pred_lb':orig_pred, 'metrics':metric_df}
else:
return {'accuracy':accuracy, 'precision':precision, 'recall':recall, 'fscore':fscore, 'train_time':train_time, 'test_time':test_time, 'roc':roc, 'prc':prc, 'feat_w':feat_w_dict, 'sub_feat_w':sub_feat_w, 'pred_lb':orig_pred, 'pred_prob':orig_prob, 'metrics':metric_df}
# Calculate the venn digram overlaps
def pred_ovl(preds, pred_true=None, axis=1):
if (axis == 0):
preds = preds.T
if (pred_true is not None):
pred_true = pred_true.reshape((-1,))
# Row represents feature, column represents instance
var_num, dim = preds.shape[0], preds.shape[1]
orig_idx = np.arange(var_num)
if (len(preds.shape) < 2 or preds.shape[1] == 1):
if (pred_true is None):
return np.ones(shape=(1,), dtype='int')
else:
overlap_mt = np.ones(shape=(1,2), dtype='int')
overlap_mt[0,1] = orig_idx[preds.reshape((-1,)) == pred_true].shape[0]
return overlap_mt
# Calculate possible subsets of all the instance indices
subset_idx = list(imath.subset(list(range(dim)), min_crdnl=1))
# Initialize result matrix
if (pred_true is None):
overlap_mt = np.zeros(shape=(len(subset_idx),), dtype='int')
else:
overlap_mt = np.zeros(shape=(len(subset_idx), 2), dtype='int')
# Calculate overlap for each subset
for i, idx in enumerate(subset_idx):
rmn_idx = set(range(dim)) - set(idx)
# Select the positions of the target instance that without any overlap with other instances
pred_sum, chsn_sum, rmn_sum = preds.sum(axis=1), preds[:,idx].sum(axis=1), preds[:,list(rmn_idx)].sum(axis=1)
condition = np.all([np.logical_or(chsn_sum == 0, chsn_sum == len(idx)), np.logical_or(rmn_sum == 0, rmn_sum == len(rmn_idx)), np.logical_or(pred_sum == len(idx), pred_sum == len(rmn_idx))], axis=0)
if (pred_true is None):
overlap_mt[i] = orig_idx[condition].shape[0]
else:
# And the selected positions should be true
true_cond = np.logical_and(condition, preds[:,idx[0]] == pred_true)
overlap_mt[i,0] = orig_idx[condition].shape[0]
overlap_mt[i,1] = orig_idx[true_cond].shape[0]
return overlap_mt
def save_featw(features, crsval_featw, crsval_subfeatw, cfg_param={}, lbid=''):
lbidstr = ('_' + (str(lbid) if lbid != -1 else 'all')) if lbid is not None and lbid != '' else ''
for k, v in crsval_featw.items():
measure_str = k.replace(' ', '_').strip('_').lower()
feat_w_mt = np.column_stack(v)
mms = MinMaxScaler()
feat_w_mt = mms.fit_transform(feat_w_mt)
feat_w_avg = feat_w_mt.mean(axis=1)
feat_w_std = feat_w_mt.std(axis=1)
sorted_idx = np.argsort(feat_w_avg, axis=-1)[::-1]
# sorted_idx = sorted(range(feat_w_avg.shape[0]), key=lambda k: feat_w_avg[k])[::-1]
sorted_feat_w = np.column_stack((features[sorted_idx], feat_w_avg[sorted_idx], feat_w_std[sorted_idx]))
feat_w_df = pd.DataFrame(sorted_feat_w, index=sorted_idx, columns=['Feature Name', 'Importance Mean', 'Importance Std'])
if (cfg_param.setdefault('save_featw', False)):
feat_w_df.to_excel('featw%s_%s.xlsx' % (lbidstr, measure_str))
if (cfg_param.setdefault('save_featw_npz', False)):
io.write_df(feat_w_df, 'featw%s_%s' % (lbidstr, measure_str), with_idx=True)
if (cfg_param.setdefault('plot_featw', False)):
plot.plot_bar(feat_w_avg[sorted_idx[:10]].reshape((1,-1)), feat_w_std[sorted_idx[:10]].reshape((1,-1)), features[sorted_idx[:10]], labels=None, title='Feature importances', fname='fig_featw%s_%s' % (lbidstr, measure_str), plot_cfg=common_cfg)
for k, v in crsval_subfeatw.items():
measure_str = k.replace(' ', '_').strip('_').lower()
subfeat_w_mt = np.column_stack(v)
mms = MinMaxScaler()
subfeat_w_mt = mms.fit_transform(subfeat_w_mt)
subfeat_w_avg = subfeat_w_mt.mean(axis=1)
subfeat_w_std = subfeat_w_mt.std(axis=1)
sorted_idx = np.argsort(subfeat_w_avg, axis=-1)[::-1]
sorted_subfeat_w = np.column_stack((features[sorted_idx], subfeat_w_avg[sorted_idx], subfeat_w_std[sorted_idx]))
subfeat_w_df = pd.DataFrame(sorted_subfeat_w, index=sorted_idx, columns=['Feature Name', 'Importance Mean', 'Importance Std'])
if (cfg_param.setdefault('save_subfeatw', False)):
subfeat_w_df.to_excel('subfeatw%s_%s.xlsx' % (lbidstr, measure_str))
if (cfg_param.setdefault('save_subfeatw_npz', False)):
io.write_df(subfeat_w_df, 'subfeatw%s_%s' % (lbidstr, measure_str), with_idx=True)
if (cfg_param.setdefault('plot_subfeatw', False)):
plot.plot_bar(subfeat_w_avg[sorted_idx[:10]].reshape((1,-1)), subfeat_w_std[sorted_idx[:10]].reshape((1,-1)), features[sorted_idx[:10]], labels=None, title='Feature importances', fname='fig_subfeatw_%s' % measure_str, plot_cfg=common_cfg)
# Classification
def classification(X_train, Y_train, X_test, model_iter, model_param={}, cfg_param={}, global_param={}, lbid=''):
print('Classifing...')
global common_cfg
FILT_NAMES, CLF_NAMES, PL_NAMES, PL_SET = model_param['glb_filtnames'], model_param['glb_clfnames'], global_param['pl_names'], global_param['pl_set']
lbidstr = ('_' + (str(lbid) if lbid != -1 else 'all')) if lbid is not None and lbid != '' else ''
to_hdf, hdf5_fpath = cfg_param.setdefault('to_hdf', False), '%s' % 'crsval_dataset.h5' if cfg_param.setdefault('hdf5_fpath', 'crsval_dataset.h5') is None else cfg_param['hdf5_fpath']
# Format the data
if (type(X_train) == list):
assert all([len(x) == len(X_train[0]) for x in X_train[1:]])
X_train = [pd.DataFrame(x) if (type(x) != pd.io.parsers.TextFileReader and type(x) != pd.DataFrame) else x for x in X_train]
X_train = [pd.concat(x) if (type(x) == pd.io.parsers.TextFileReader and not to_hdf) else x for x in X_train]
else:
if (type(X_train) != pd.io.parsers.TextFileReader and type(X_train) != pd.DataFrame):
X_train = pd.DataFrame(X_train)
X_train = pd.concat(X_train) if (type(X_train) == pd.io.parsers.TextFileReader and not to_hdf) else X_train
if (type(X_test) == list):
assert all([len(x) == len(X_test[0]) for x in X_test[1:]])
X_test = [pd.DataFrame(x) if (type(x) != pd.io.parsers.TextFileReader and type(x) != pd.DataFrame) else x for x in X_test]
X_test = [pd.concat(x) if (type(x) == pd.io.parsers.TextFileReader and not to_hdf) else x for x in X_test]
else:
if (type(X_test) != pd.io.parsers.TextFileReader and type(X_test) != pd.DataFrame):
X_test = pd.DataFrame(X_test)
X_test = pd.concat(X_test) if (type(X_test) == pd.io.parsers.TextFileReader and not to_hdf) else X_test
if (type(Y_train) != pd.io.parsers.TextFileReader and type(Y_train) != pd.DataFrame):
Y_train = pd.DataFrame(Y_train)
Y_train_mt = Y_train.values.reshape((Y_train.shape[0],)) if (len(Y_train.shape) == 1 or Y_train.shape[1] == 1) else Y_train.values
mltl=True if len(Y_train_mt.shape) > 1 and Y_train_mt.shape[1] > 1 or 2 in Y_train_mt else False
print('Classification is starting...')
preds, probs, scores = [[] for i in range(3)]
crsval_featw, crsval_subfeatw = [{} for i in range(2)]
for vars in model_iter(**model_param):
if (global_param['comb']):
mdl_name, mdl = [vars[x] for x in range(2)]
else:
filt_name, filter, clf_name, clf= [vars[x] for x in range(4)]
print('#' * 80)
# Assemble a pipeline
if ('filter' in locals() and filter != None):
model_name = '%s [Ft Filt] & %s [CLF]' % (filt_name, clf_name)
pipeline = Pipeline([('featfilt', clone(filter)), ('clf', clf)])
elif ('clf' in locals() and clf != None):
model_name = '%s [CLF]' % clf_name
pipeline = Pipeline([('clf', clf)])
else:
model_name = mdl_name
pipeline = mdl if (type(mdl) is Pipeline) else Pipeline([('clf', mdl)])
if (model_name in PL_SET): continue
PL_NAMES.append(model_name)
PL_SET.add(model_name)
print(model_name)
# Build the model
print('+' * 80)
print('Training Model: ')
print(pipeline)
t0 = time()
pipeline.fit(X_train, Y_train_mt)
train_time = time() - t0
print('train time: %0.3fs' % train_time)
t0 = time()
pred = pipeline.predict(X_test)
prob = pipeline.predict_proba(X_test)
test_time = time() - t0
print('+' * 80)
print('Testing: ')
print('test time: %0.3fs' % test_time)
preds.append(pred)
probs.append(prob)
scores.append(get_score(pipeline, X_test, mltl))
# Save predictions and model
if (cfg_param.setdefault('save_pred', True)):
io.write_npz(dict(pred_lb=pred, pred_prob=prob), 'clf_pred_%s%s' % (model_name.replace(' ', '_').lower(), lbidstr))
if (cfg_param.setdefault('save_model', True)):
mdl_name = '%s' % model_name.replace(' ', '_').lower()
if (all([hasattr(pipeline.steps[i][1], 'save') for i in range(len(pipeline.steps))])):
for sub_mdl_name, mdl in pipeline.steps:
mdl.save('%s_%s%s' % (mdl_name, sub_mdl_name.replace(' ', '_').lower(), lbidstr), **global_param.setdefault('mdl_save_kwargs', {}))
else:
io.write_obj(pipeline, '%s%s' % (mdl_name, lbidstr))
# Feature importances
feat_w, sub_feat_w = get_featw(pipeline, X_train[0].shape[1] if (type(X_train) is list) else X_train.shape[1])
for k, v in feat_w.items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
crsval_featw.setdefault(key, []).append(v)
for k, v in sub_feat_w.items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
crsval_subfeatw.setdefault(key, []).append(v)
print('\n')
if (len(preds) > 1):
# Prediction overlap
preds_mt = np.column_stack([x.ravel() for x in preds])
povl = np.array(pred_ovl(preds_mt))
# Spearman's rank correlation
spmnr, spmnr_pval = stats.spearmanr(preds_mt)
# Kendall rank correlation
# kendalltau = stats.kendalltau(preds_mt)[0]
# Pearson correlation
# pearson = tats.pearsonr(preds_mt)[0]
## Save performance data
povl_idx = [' & '.join(x) for x in imath.subset(PL_NAMES, min_crdnl=1)]
povl_df =
|
pd.DataFrame(povl, index=povl_idx, columns=['pred_ovl'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import os
import sys
from subprocess import call
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator
import scipy
import json
from sklearn.decomposition import PCA as skPCA
from scipy.spatial import distance
from scipy.cluster import hierarchy
import seaborn as sns
from matplotlib.colors import rgb2hex, colorConverter
from pprint import pprint
import difflib
from operator import itemgetter
import itertools
from functools import reduce
import matplotlib.ticker as ticker
import math
import matplotlib.patches as patches
from collections import defaultdict
import collections
from sklearn.manifold import TSNE
from sklearn.decomposition import TruncatedSVD
from sklearn.cluster import KMeans
import matplotlib.mlab as mlab
def make_new_matrix_gene(org_matrix_by_gene, gene_list_source, exclude_list=""):
if isinstance(gene_list_source,str):
gene_df = pd.read_csv(open(gene_list_source,'rU'), sep=None, engine='python')
try:
gene_list = list(set(gene_df['GeneID'].tolist()))
if exclude_list != "":
gene_list = [g for g in gene_list if g not in exclude_list]
except KeyError:
sys.exit("Error: Please provide Gene list file with 'GeneID' as header.")
try:
group_list = gene_df['GroupID'].tolist()
except KeyError:
sys.exit("Error: Please provide Gene list file with 'GroupID' as header.")
try:
gmatrix_df = org_matrix_by_gene[gene_list]
except KeyError as error_gene:
cause1 = error_gene.args[0].strip(' not in index')
cause = [v.strip('\n\' ') for v in cause1.strip('[]').split(' ')]
absent_gene = cause
print(' '.join(absent_gene)+' not in matrix file.')
new_list = [x for x in gene_list if x not in absent_gene]
gmatrix_df = org_matrix_by_gene[new_list]
cmatrix_df = gmatrix_df.transpose()
cell_list1 = cmatrix_df.columns.values
new_cmatrix_df = cmatrix_df[cell_list1]
new_gmatrix_df = new_cmatrix_df.transpose()
return new_cmatrix_df, new_gmatrix_df
elif isinstance(gene_list_source,list):
if exclude_list == "":
gene_list = gene_list_source
else:
gene_list = [g for g in gene_list_source if g not in exclude_list]
try:
gmatrix_df = org_matrix_by_gene[gene_list]
except KeyError as error_gene:
cause = error_gene.args[0]
absent_gene = cause.split('\'')[1]
print(absent_gene+' not in matrix file.')
new_list = [x for x in gene_list if x not in [absent_gene]]
gmatrix_df = org_matrix_by_gene[new_list]
cmatrix_df = gmatrix_df.transpose()
cell_list1 = cmatrix_df.columns.values
new_cmatrix_df = cmatrix_df[cell_list1]
new_gmatrix_df = new_cmatrix_df.transpose()
return new_cmatrix_df, new_gmatrix_df
else:
sys.exit("Error: gene list must be filepath or a list.")
def make_new_matrix_cell(org_matrix_by_cell, cell_list_file):
cell_df = pd.read_csv(open(cell_list_file,'rU'), sep=None, engine='python')
cell_list_new = list(set([cell.strip('\n') for cell in cell_df['SampleID'].tolist()]))
cell_list_old = org_matrix_by_cell.columns.tolist()
overlap = [c for c in cell_list_new if c in cell_list_old]
not_in_matrix = [c for c in cell_list_new if c not in cell_list_old]
if not_in_matrix != []:
print('These cells were in the cell list provided, but not found in the matrix provided:')
print(not_in_matrix)
new_cmatrix_df = org_matrix_by_cell[overlap]
new_gmatrix_df = new_cmatrix_df.transpose()
return new_cmatrix_df, new_gmatrix_df
def threshold_genes(by_gene, number_expressed=1, gene_express_cutoff=1.0):
by_gene.apply(lambda column: (column >= 1).sum())
return
def find_top_common_genes(log2_df_by_cell, num_common=25):
top_common_list = []
count = 0
done = False
log2_df_by_gene = log2_df_by_cell.transpose()
log2_df2_gene = log2_df_by_gene.apply(pd.to_numeric,errors='coerce')
log_mean = log2_df2_gene.mean(axis=0).sort_values(ascending=False)
try:
log2_sorted_gene = log2_df_by_gene.reindex_axis(log2_df_by_gene.mean(axis=0).sort_values(ascending=False).index, axis=1)
except ValueError:
overlap_list = [item for item, count in collections.Counter(log2_df_by_cell.index).items() if count > 1]
print(overlap_list, len(overlap_list))
sys.exit('Error: Duplicate GeneIDs are present.')
for gene in log2_sorted_gene.columns.tolist():
if sum(genes < 1 for genes in log2_df_by_gene[gene])<6:
if count < num_common:
count+=1
top_common_list.append(gene)
if count == num_common:
done = True
break
if done:
return log2_df_by_gene[top_common_list].transpose()
else:
return [0]
def log2_oulierfilter(df_by_cell, plot=False, already_log2=False):
if not already_log2:
log2_df = np.log2(df_by_cell+1)
else:
log2_df = df_by_cell
top_log2 = find_top_common_genes(log2_df)
if all(top_log2) != 0:
log2_df2= log2_df.apply(pd.to_numeric,errors='coerce')
log_mean = top_log2.mean(axis=0).sort_values(ascending=False)
log2_sorted = top_log2.reindex_axis(top_log2.mean(axis=0).sort_values(ascending=False).index, axis=1)
xticks = []
keep_col= []
log2_cutoff = np.average(np.average(log2_sorted))-2*np.average(np.std(log2_sorted))
for col, m in zip(log2_sorted.columns.tolist(),log2_sorted.mean()):
if m > log2_cutoff:
keep_col.append(col)
xticks.append(col+' '+str("%.2f" % m))
excluded_cells = [x for x in log2_sorted.columns.tolist() if x not in keep_col]
filtered_df_by_cell = df_by_cell[keep_col]
filtered_df_by_gene = filtered_df_by_cell.transpose()
if not already_log2:
filtered_log2 = np.log2(filtered_df_by_cell[filtered_df_by_cell>0])
else:
filtered_log2 = filtered_df_by_cell[filtered_df_by_cell>0]
if plot:
ax = sns.boxplot(data=filtered_log2, whis= .75, notch=True)
ax = sns.stripplot(x=filtered_log2.columns.values, y=filtered_log2.mean(axis=0), size=4, jitter=True, edgecolor="gray")
xtickNames = plt.setp(ax, xticklabels=xticks)
plt.setp(xtickNames, rotation=90, fontsize=9)
plt.show()
plt.clf()
sns.distplot(filtered_log2.mean())
plt.show()
if not already_log2:
log2_expdf_cell = np.log2(filtered_df_by_cell+1)
else:
log2_expdf_cell = filtered_df_by_cell
log2_expdf_gene = log2_expdf_cell.transpose()
return log2_expdf_cell, log2_expdf_gene
else:
print("no common genes found")
return log2_df, log2_df.transpose()
def augmented_dendrogram(*args, **kwargs):
plt.clf()
ddata = dendrogram(*args, **kwargs)
if not kwargs.get('no_plot', False):
for i, d in zip(ddata['icoord'], ddata['dcoord'], ):
x = 0.5 * sum(i[1:3])
y = d[1]
if y >= 200000:
plt.plot(x, y, 'ro')
plt.annotate("%.3g" % y, (x, y), xytext=(0, -8),
textcoords='offset points',
va='top', ha='center')
plt.show()
plt.savefig(os.path.join(new_file,'augmented_dendrogram.png'))
def cluster_indices(cluster_assignments):
n = cluster_assignments.max()
indices = []
for cluster_number in range(1, n + 1):
indices.append(np.where(cluster_assignments == cluster_number)[0])
return indices
def clust_members(r_link, cutoff):
clust = fcluster(r_link,cutoff)
num_clusters = clust.max()
indices = cluster_indices(clust)
return num_clusters, indices
def print_clust_membs(indices, cell_list):
for k, ind in enumerate(indices):
print("cluster", k + 1, "is", [cell_list[x] for x in ind])
def plot_tree(dendr, path_filename, pos=None, save=False):
icoord = scipy.array(dendr['icoord'])
dcoord = scipy.array(dendr['dcoord'])
color_list = scipy.array(dendr['color_list'])
xmin, xmax = icoord.min(), icoord.max()
ymin, ymax = dcoord.min(), dcoord.max()
if pos:
icoord = icoord[pos]
dcoord = dcoord[pos]
for xs, ys, color in zip(icoord, dcoord, color_list):
plt.plot(xs, ys, color)
plt.xlim(xmin-10, xmax + 0.1*abs(xmax))
plt.ylim(ymin, ymax + 0.1*abs(ymax))
if save:
plt.savefig(os.path.join(path_filename,'plot_dendrogram.png'))
plt.show()
# Create a nested dictionary from the ClusterNode's returned by SciPy
def add_node(node, parent):
# First create the new node and append it to its parent's children
newNode = dict( node_id=node.id, children=[] )
parent["children"].append( newNode )
# Recursively add the current node's children
if node.left: add_node( node.left, newNode )
if node.right: add_node( node.right, newNode )
cc = []
# Label each node with the names of each leaf in its subtree
def label_tree(n, id2name):
# If the node is a leaf, then we have its name
if len(n["children"]) == 0:
leafNames = [ id2name[n["node_id"]] ]
# If not, flatten all the leaves in the node's subtree
else:
leafNames = reduce(lambda ls, c: ls + label_tree(c,id2name), n["children"], [])
cc.append((len(leafNames), [x.strip('\n') for x in leafNames]))
cc.sort(key=lambda tup: tup[0], reverse = True)
# Delete the node id since we don't need it anymore and
# it makes for cleaner JSON
del n["node_id"]
# Labeling convention: "-"-separated leaf names
n["name"] = name = "-".join(sorted(map(str, leafNames)))
return leafNames
#Makes labeled json tree for visulaization in d3, makes and returns cc object within label_tree
def make_tree_json(row_clusters, df_by_gene, path_filename):
T= hierarchy.to_tree(row_clusters)
# Create dictionary for labeling nodes by their IDs
labels = list(df_by_gene.index)
id2name = dict(zip(range(len(labels)), labels))
# Initialize nested dictionary for d3, then recursively iterate through tree
d3Dendro = dict(children=[], name="Root1")
add_node( T, d3Dendro )
label_tree( d3Dendro["children"][0], id2name )
# Output to JSON
json.dump(d3Dendro, open(os.path.join(path_filename,"d3-dendrogram.json"), "w"), sort_keys=True, indent=4)
return cc
#finds significant genes between subclusters
def find_twobytwo(cc, df_by_cell, full_by_cell_df, path_filename, cluster_size=20):
gene_list = full_by_cell_df.index.tolist()
by_gene_df = full_by_cell_df.transpose()
pair_dict = {}
parent = cc[0][1]
p_num = cc[0][0]
l_nums = [x[0] for x in cc]
c_lists = [c[1] for c in cc[1:]]
unique_count = 1
pair_list = []
for i, c in enumerate(c_lists):
for i2, c2 in enumerate(c_lists):
overlap = [i for i in c if i in c2]
if not overlap and len(c)>=cluster_size and len(c2)>=cluster_size:
if (c,c2) not in pair_list:
pair_list.append((c,c2))
pair_list.append((c2,c))
pair_dict[str(len(c))+'cells_vs_'+str(len(c2))+'cells'+str(unique_count)]= [c, c2]
unique_count+=1
for v, k in pair_dict.items():
g_pvalue_dict = {}
index_list = []
sig_gene_list = []
cell_list1 = [x.strip('\n') for x in k[0]]
cell_list2 = [xx.strip('\n') for xx in k[1]]
group1 = str(len(cell_list1))
group2 = str(len(cell_list2))
df_by_cell_1 = full_by_cell_df[cell_list1]
df_by_cell_2 = full_by_cell_df[cell_list2]
df_by_gene_1 = df_by_cell_1.transpose()
df_by_gene_2 = df_by_cell_2.transpose()
for g in gene_list:
g_pvalue = scipy.stats.f_oneway(df_by_gene_1[g], df_by_gene_2[g])
if g_pvalue[0] > 0 and g_pvalue[1] <= 1:
g_pvalue_dict[g] = g_pvalue
if g not in [s[0] for s in sig_gene_list]:
sig_gene_list.append([g, g_pvalue[1]])
sig_gene_list.sort(key=lambda tup: tup[1])
pvalues = [p[1] for p in sig_gene_list]
gene_index = [ge[0] for ge in sig_gene_list]
mean_log2_exp_list = []
sig_1_2_list = []
mean1_list = []
mean2_list = []
for sig_gene in gene_index:
sig_gene_df = by_gene_df[sig_gene]
mean_log2_exp_list.append(sig_gene_df.mean())
sig_cell_df = sig_gene_df.transpose()
mean_cell1 = sig_cell_df[cell_list1].mean()
mean1_list.append(mean_cell1)
mean_cell2 = sig_cell_df[cell_list2].mean()
mean2_list.append(mean_cell2)
ratio_1_2 = (mean_cell1+1)/(mean_cell2+1)
sig_1_2_list.append(ratio_1_2)
sig_df = pd.DataFrame({'pvalues':pvalues,'mean_all':mean_log2_exp_list,'mean_group1':mean1_list, 'mean_group2':mean2_list, 'ratio_1_2':sig_1_2_list}, index=gene_index)
cell_names_df = pd.DataFrame({'cells1':pd.Series(cell_list1, index=range(len(cell_list1))), 'cells2':pd.Series(cell_list2, index=range(len(cell_list2)))})
sig_df.to_csv(os.path.join(path_filename,'sig_'+v+'_pvalues.txt'), sep = '\t')
cell_names_df.to_csv(os.path.join(path_filename,'sig_'+v+'_cells.txt'), sep = '\t')
def ellip_enclose(points, color, inc=1, lw=2, nst=2):
"""
Plot the minimum ellipse around a set of points.
Based on:
https://github.com/joferkington/oost_paper_code/blob/master/error_ellipse.py
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
x = points[:,0]
y = points[:,1]
cov = np.cov(x, y)
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
w, h = 2 * nst * np.sqrt(vals)
center = np.mean(points, 0)
ell = patches.Ellipse(center, width=inc*w, height=inc*h, angle=theta,
facecolor=color, alpha=0.2, lw=0)
edge = patches.Ellipse(center, width=inc*w, height=inc*h, angle=theta,
facecolor='none', edgecolor=color, lw=lw)
return ell, edge
def return_top_pca_gene(df_by_gene, num_genes=100):
gene_pca = skPCA(n_components=3)
np_by_gene = np.asarray(df_by_gene)
by_gene_trans = gene_pca.fit_transform(np_by_gene)
Pc_df = pd.DataFrame(gene_pca.components_.T, columns=['PC-1', 'PC-2', 'PC-3'], index=df_by_gene.columns.tolist())
pca_rank_df = Pc_df.abs().sum(axis=1)
Pc_sort_df = pca_rank_df.nlargest(len(df_by_gene.columns.tolist()))
top_pca_list = Pc_sort_df.index.tolist()
new_gene_matrix = df_by_gene[top_pca_list[0:num_genes]]
return new_gene_matrix, top_pca_list[0:num_genes]
def plot_PCA(args, df_by_gene, path_filename, num_genes=100, gene_list_filter=False, title='', plot=False, label_map=False, gene_map = False):
gene_list = df_by_gene.columns.tolist()
sns.set_palette("RdBu_r", 10, 1)
if gene_list_filter:
sig_by_gene = df_by_gene[gene_list_filter]
sig_by_cell = sig_by_gene.transpose()
else:
sig_by_gene = df_by_gene
sig_by_cell = sig_by_gene.transpose()
gene_pca = skPCA(n_components=3)
np_by_gene = np.asarray(sig_by_gene)
by_gene_trans = gene_pca.fit_transform(np_by_gene)
Pc_df = pd.DataFrame(gene_pca.components_.T, columns=['PC-1', 'PC-2', 'PC-3'], index=sig_by_gene.columns.tolist())
pca_rank_df = Pc_df.abs().sum(axis=1)
Pc_sort_df = pca_rank_df.nlargest(len(sig_by_gene.columns.tolist()))
top_pca_list = Pc_sort_df.index.tolist()
top_by_gene = df_by_gene[top_pca_list[0:num_genes]]
gene_top = skPCA(n_components=2)
cell_pca = skPCA(n_components=2)
top_by_cell = top_by_gene.transpose()
np_top_gene = np.asarray(top_by_cell)
np_top_cell = np.asarray(top_by_gene)
top_cell_trans = cell_pca.fit_transform(np_top_cell)
top_gene_trans = gene_top.fit_transform(np_top_gene)
if not np.isnan(top_cell_trans).any():
fig, (ax_cell, ax_gene) = plt.subplots(2, 1, figsize=(15, 30), sharex=False)
rect_cell = ax_cell.patch
rect_gene = ax_gene.patch
rect_cell.set_facecolor('white')
rect_gene.set_facecolor('white')
ax_cell.grid(b=True, which='major', color='grey', linestyle='--', linewidth=0.3)
ax_gene.grid(b=True, which='major', color='grey', linestyle='--', linewidth=0.3)
if label_map:
annotate = args.annotate_cell_pca
X = [x for x in top_cell_trans[:, 0]]
Y = [y for y in top_cell_trans[:, 1]]
labels = [label_map[cell][2] for cell in top_by_cell.columns.tolist()]
markers = [label_map[cell][1] for cell in top_by_cell.columns.tolist()]
colors = [label_map[cell][0] for cell in top_by_cell.columns.tolist()]
label_done = []
xy_by_color_dict = {}
for c in set(colors):
xy_by_color_dict[c] = []
for X_pos, Y_pos, m, color, l in zip(X, Y, markers, colors, labels):
if l in label_done:
lab = ''
else:
lab= l
label_done.append(l)
xy_by_color_dict[color].append([X_pos,Y_pos])
ax_cell.scatter(X_pos, Y_pos, marker=m, c=color, label=lab, s=30)
if args.add_ellipse:
for c in set(colors):
ell, edge = ellip_enclose(np.asarray(xy_by_color_dict[c]), c)
ax_cell.add_artist(ell)
ax_cell.add_artist(edge)
else:
ax_cell.scatter(top_cell_trans[:, 0], top_cell_trans[:, 1], alpha=0.75)
annotate = args.annotate_cell_pca
ax_cell.set_xlim([min(top_cell_trans[:, 0])-1, max(top_cell_trans[:, 0]+1)])
ax_cell.set_ylim([min(top_cell_trans[:, 1])-1, max(top_cell_trans[:, 1]+2)])
ax_cell.set_title(title+'_cell')
if label_map:
handles, labs = ax_cell.get_legend_handles_labels()
# sort both labels and handles by labels
labs, handles = zip(*sorted(zip(labs, handles), key=lambda t: t[0]))
ax_cell.legend(handles, labs, loc='best', ncol=1, prop={'size':12}, markerscale=1.5, frameon=True)
ax_cell.set_xlabel('PC1')
ax_cell.set_ylabel('PC2')
if annotate:
for label, x, y in zip(top_by_cell.columns, top_cell_trans[:, 0], top_cell_trans[:, 1]):
ax_cell.annotate(label, (x+0.1, y+0.1))
if gene_map:
X = [x for x in top_gene_trans[:, 0]]
Y = [y for y in top_gene_trans[:, 1]]
labels = top_by_gene.columns.tolist()
markers = [gene_map[gene][1] for gene in top_by_gene.columns.tolist()]
colors = [gene_map[gene][0] for gene in top_by_gene.columns.tolist()]
xy_by_color_dict = {}
for c in set(colors):
xy_by_color_dict[c] = []
for X_pos, Y_pos, m, color, l in zip(X, Y, markers, colors, labels):
xy_by_color_dict[color].append([X_pos,Y_pos])
ax_gene.scatter(X_pos, Y_pos, marker=m, c=color, label = l, s=30)
if args.add_ellipse:
for c in set(colors):
ell, edge = ellip_enclose(np.asarray(xy_by_color_dict[c]), c)
ax_gene.add_artist(ell)
ax_gene.add_artist(edge)
else:
ax_gene.scatter(top_gene_trans[:, 0], top_gene_trans[:, 1], alpha=0.75)
ax_gene.set_xlim([min(top_gene_trans[:, 0])-1, max(top_gene_trans[:, 0])+1])
ax_gene.set_ylim([min(top_gene_trans[:, 1])-1, max(top_gene_trans[:, 1])+2])
ax_gene.set_title(title+'_gene')
ax_gene.set_xlabel('PC1')
ax_gene.set_ylabel('PC2')
if args.annotate_gene_subset:
plot_subset_path = os.path.join(os.path.dirname(args.filepath),args.annotate_gene_subset)
genes_plot = pd.read_csv(plot_subset_path, sep='\t', index_col=False)
for label, x, y in zip(top_by_gene.columns, top_gene_trans[:, 0], top_gene_trans[:, 1]):
if label in genes_plot['GeneID'].tolist():
if '_' in label:
label = label.split('_')[0]
ax_gene.annotate(label, (x+0.1, y+0.1))
else:
for label, x, y in zip(top_by_gene.columns, top_gene_trans[:, 0], top_gene_trans[:, 1]):
if '_' in label:
label = label.split('_')[0]
ax_gene.annotate(label, (x+0.1, y+0.1))
if plot:
plt.show()
if title != '':
save_name = '_'.join(title.split(' ')[0:2])
plt.savefig(os.path.join(path_filename,save_name+'_skpca.pdf'), bbox_inches='tight')
else:
plt.savefig(os.path.join(path_filename,'Group0_skpca.pdf'), bbox_inches='tight')
plt.close('all')
return top_pca_list
else:
return []
def plot_SVD(args,df_by_gene, path_filename, num_genes=100, gene_list_filter=False, title='', plot=False, label_map=False, gene_map = False):
gene_list = df_by_gene.columns.tolist()
sns.set_palette("RdBu_r", 10, 1)
if gene_list_filter:
sig_by_gene = df_by_gene[gene_list_filter]
sig_by_cell = sig_by_gene.transpose()
else:
sig_by_gene = df_by_gene
sig_by_cell = sig_by_gene.transpose()
gene_pca = TruncatedSVD(n_components=3)
np_by_gene = np.asarray(sig_by_gene)
by_gene_trans = gene_pca.fit_transform(np_by_gene)
Pc_df = pd.DataFrame(gene_pca.components_.T, columns=['PC-1', 'PC-2', 'PC-3'], index=sig_by_gene.columns.tolist())
pca_rank_df = Pc_df.abs().sum(axis=1)
Pc_sort_df = pca_rank_df.nlargest(len(sig_by_gene.columns.tolist()))
top_pca_list = Pc_sort_df.index.tolist()
top_by_gene = df_by_gene[top_pca_list[0:num_genes]]
gene_top = TruncatedSVD(n_components=2)
cell_pca = TruncatedSVD(n_components=2)
top_by_cell = top_by_gene.transpose()
np_top_gene = np.asarray(top_by_cell)
np_top_cell = np.asarray(top_by_gene)
top_cell_trans = cell_pca.fit_transform(np_top_cell)
top_gene_trans = gene_top.fit_transform(np_top_gene)
if not np.isnan(top_cell_trans).any():
fig, (ax_cell, ax_gene) = plt.subplots(2, 1, figsize=(15, 30), sharex=False)
rect_cell = ax_cell.patch
rect_gene = ax_gene.patch
rect_cell.set_facecolor('white')
rect_gene.set_facecolor('white')
ax_cell.grid(b=True, which='major', color='grey', linestyle='--', linewidth=0.3)
ax_gene.grid(b=True, which='major', color='grey', linestyle='--', linewidth=0.3)
if label_map:
annotate = args.annotate_cell_pca
X = [x for x in top_cell_trans[:, 0]]
Y = [y for y in top_cell_trans[:, 1]]
labels = [label_map[cell][2] for cell in top_by_cell.columns.tolist()]
markers = [label_map[cell][1] for cell in top_by_cell.columns.tolist()]
colors = [label_map[cell][0] for cell in top_by_cell.columns.tolist()]
label_done = []
xy_by_color_dict = {}
for c in set(colors):
xy_by_color_dict[c] = []
for X_pos, Y_pos, m, color, l in zip(X, Y, markers, colors, labels):
if l in label_done:
lab = ''
else:
lab= l
label_done.append(l)
xy_by_color_dict[color].append([X_pos,Y_pos])
ax_cell.scatter(X_pos, Y_pos, marker=m, c=color, label=lab, s=30)
if args.add_ellipse:
for c in set(colors):
ell, edge = ellip_enclose(np.asarray(xy_by_color_dict[c]), c)
ax_cell.add_artist(ell)
ax_cell.add_artist(edge)
else:
ax_cell.scatter(top_cell_trans[:, 0], top_cell_trans[:, 1], alpha=0.75)
annotate = args.annotate_cell_pca
ax_cell.set_xlim([min(top_cell_trans[:, 0])-1, max(top_cell_trans[:, 0]+1)])
ax_cell.set_ylim([min(top_cell_trans[:, 1])-1, max(top_cell_trans[:, 1]+2)])
ax_cell.set_title(title+'_cell')
if label_map:
handles, labs = ax_cell.get_legend_handles_labels()
# sort both labels and handles by labels
labs, handles = zip(*sorted(zip(labs, handles), key=lambda t: t[0]))
ax_cell.legend(handles, labs, loc='best', ncol=1, prop={'size':12}, markerscale=1.5, frameon=True)
ax_cell.set_xlabel('PC1')
ax_cell.set_ylabel('PC2')
if annotate:
for label, x, y in zip(top_by_cell.columns, top_cell_trans[:, 0], top_cell_trans[:, 1]):
ax_cell.annotate(label, (x+0.1, y+0.1))
if gene_map:
X = [x for x in top_gene_trans[:, 0]]
Y = [y for y in top_gene_trans[:, 1]]
labels = top_by_gene.columns.tolist()
markers = [gene_map[gene][1] for gene in top_by_gene.columns.tolist()]
colors = [gene_map[gene][0] for gene in top_by_gene.columns.tolist()]
xy_by_color_dict = {}
for c in set(colors):
xy_by_color_dict[c] = []
for X_pos, Y_pos, m, color, l in zip(X, Y, markers, colors, labels):
xy_by_color_dict[color].append([X_pos,Y_pos])
ax_gene.scatter(X_pos, Y_pos, marker=m, c=color, label = l, s=30)
if args.add_ellipse:
for c in set(colors):
ell, edge = ellip_enclose(np.asarray(xy_by_color_dict[c]), c)
ax_gene.add_artist(ell)
ax_gene.add_artist(edge)
else:
ax_gene.scatter(top_gene_trans[:, 0], top_gene_trans[:, 1], alpha=0.75)
ax_gene.set_xlim([min(top_gene_trans[:, 0])-1, max(top_gene_trans[:, 0])+1])
ax_gene.set_ylim([min(top_gene_trans[:, 1])-1, max(top_gene_trans[:, 1])+2])
ax_gene.set_title(title+'_gene')
ax_gene.set_xlabel('PC1')
ax_gene.set_ylabel('PC2')
if args.annotate_gene_subset:
plot_subset_path = os.path.join(os.path.dirname(args.filepath),args.annotate_gene_subset)
genes_plot = pd.read_csv(plot_subset_path, sep='\t', index_col=False)
for label, x, y in zip(top_by_gene.columns, top_gene_trans[:, 0], top_gene_trans[:, 1]):
if label in genes_plot['GeneID'].tolist():
if '_' in label:
label = label.split('_')[0]
ax_gene.annotate(label, (x+0.1, y+0.1))
else:
for label, x, y in zip(top_by_gene.columns, top_gene_trans[:, 0], top_gene_trans[:, 1]):
if '_' in label:
label = label.split('_')[0]
ax_gene.annotate(label, (x+0.1, y+0.1))
if plot:
plt.show()
if title != '':
save_name = '_'.join(title.split(' ')[0:2])
plt.savefig(os.path.join(path_filename,save_name+'_TruncatedSVD.pdf'), bbox_inches='tight')
#plot_url = py.plot_mpl(fig)
else:
#plot_url = py.plot_mpl(fig)
plt.savefig(os.path.join(path_filename,'Group0_TruncatedSVD.pdf'), bbox_inches='tight')
plt.close('all')
return top_pca_list
else:
return []
#create cell and gene TSNE scatter plots (one pdf)
def plot_TSNE(args,df_by_gene, path_filename, num_genes=100, gene_list_filter=False, title='', plot=False, label_map=False, gene_map = False):
gene_list = df_by_gene.columns.tolist()
sns.set_palette("RdBu_r", 10, 1)
if gene_list_filter:
sig_by_gene = df_by_gene[gene_list_filter]
sig_by_cell = sig_by_gene.transpose()
else:
sig_by_gene = df_by_gene
sig_by_cell = sig_by_gene.transpose()
gene_pca = TruncatedSVD(n_components=3)
np_by_gene = np.asarray(sig_by_gene)
by_gene_trans = gene_pca.fit_transform(np_by_gene)
Pc_df = pd.DataFrame(gene_pca.components_.T, columns=['PC-1', 'PC-2', 'PC-3'], index=sig_by_gene.columns.tolist())
pca_rank_df = Pc_df.abs().sum(axis=1)
Pc_sort_df = pca_rank_df.nlargest(len(sig_by_gene.columns.tolist()))
top_pca_list = Pc_sort_df.index.tolist()
top_by_gene = df_by_gene[top_pca_list[0:num_genes]]
gene_top = TSNE(n_components=2, init='pca', random_state=0)
cell_pca = TSNE(n_components=2, init='pca', random_state=0)
top_by_cell = top_by_gene.transpose()
np_top_gene = np.asarray(top_by_cell)
np_top_cell = np.asarray(top_by_gene)
top_cell_trans = cell_pca.fit_transform(np_top_cell)
top_gene_trans = gene_top.fit_transform(np_top_gene)
if not np.isnan(top_cell_trans).any():
fig, (ax_cell, ax_gene) = plt.subplots(2, 1, figsize=(15, 30), sharex=False)
rect_cell = ax_cell.patch
rect_gene = ax_gene.patch
rect_cell.set_facecolor('white')
rect_gene.set_facecolor('white')
ax_cell.grid(b=True, which='major', color='grey', linestyle='--', linewidth=0.3)
ax_gene.grid(b=True, which='major', color='grey', linestyle='--', linewidth=0.3)
if label_map:
annotate = args.annotate_cell_pca
X = [x for x in top_cell_trans[:, 0]]
Y = [y for y in top_cell_trans[:, 1]]
labels = [label_map[cell][2] for cell in top_by_cell.columns.tolist()]
markers = [label_map[cell][1] for cell in top_by_cell.columns.tolist()]
colors = [label_map[cell][0] for cell in top_by_cell.columns.tolist()]
label_done = []
xy_by_color_dict = {}
for c in set(colors):
xy_by_color_dict[c] = []
for X_pos, Y_pos, m, color, l in zip(X, Y, markers, colors, labels):
if l in label_done:
lab = ''
else:
lab= l
label_done.append(l)
xy_by_color_dict[color].append([X_pos,Y_pos])
ax_cell.scatter(X_pos, Y_pos, marker=m, c=color, label=lab, s=30)
if args.add_ellipse:
for c in set(colors):
ell, edge = ellip_enclose(np.asarray(xy_by_color_dict[c]), c)
ax_cell.add_artist(ell)
ax_cell.add_artist(edge)
else:
ax_cell.scatter(top_cell_trans[:, 0], top_cell_trans[:, 1], alpha=0.75)
annotate = args.annotate_cell_pca
ax_cell.set_xlim([min(top_cell_trans[:, 0])-1, max(top_cell_trans[:, 0]+1)])
ax_cell.set_ylim([min(top_cell_trans[:, 1])-1, max(top_cell_trans[:, 1]+2)])
ax_cell.set_title(title+'_cell')
if label_map:
handles, labs = ax_cell.get_legend_handles_labels()
# sort both labels and handles by labels
labs, handles = zip(*sorted(zip(labs, handles), key=lambda t: t[0]))
ax_cell.legend(handles, labs, loc='best', ncol=1, prop={'size':12}, markerscale=1.5, frameon=True)
ax_cell.set_xlabel('PC1')
ax_cell.set_ylabel('PC2')
if annotate:
for label, x, y in zip(top_by_cell.columns, top_cell_trans[:, 0], top_cell_trans[:, 1]):
ax_cell.annotate(label, (x+0.1, y+0.1))
if gene_map:
X = [x for x in top_gene_trans[:, 0]]
Y = [y for y in top_gene_trans[:, 1]]
labels = top_by_gene.columns.tolist()
markers = [gene_map[gene][1] for gene in top_by_gene.columns.tolist()]
colors = [gene_map[gene][0] for gene in top_by_gene.columns.tolist()]
xy_by_color_dict = {}
for c in set(colors):
xy_by_color_dict[c] = []
for X_pos, Y_pos, m, color, l in zip(X, Y, markers, colors, labels):
xy_by_color_dict[color].append([X_pos,Y_pos])
ax_gene.scatter(X_pos, Y_pos, marker=m, c=color, label = l, s=30)
if args.add_ellipse:
for c in set(colors):
ell, edge = ellip_enclose(np.asarray(xy_by_color_dict[c]), c)
ax_gene.add_artist(ell)
ax_gene.add_artist(edge)
else:
ax_gene.scatter(top_gene_trans[:, 0], top_gene_trans[:, 1], alpha=0.75)
ax_gene.set_xlim([min(top_gene_trans[:, 0])-1, max(top_gene_trans[:, 0])+1])
ax_gene.set_ylim([min(top_gene_trans[:, 1])-1, max(top_gene_trans[:, 1])+2])
ax_gene.set_title(title+'_gene')
ax_gene.set_xlabel('PC1')
ax_gene.set_ylabel('PC2')
if args.annotate_gene_subset:
plot_subset_path = os.path.join(os.path.dirname(args.filepath),args.annotate_gene_subset)
genes_plot = pd.read_csv(plot_subset_path, sep='\t', index_col=False)
for label, x, y in zip(top_by_gene.columns, top_gene_trans[:, 0], top_gene_trans[:, 1]):
if label in genes_plot['GeneID'].tolist():
if '_' in label:
label = label.split('_')[0]
ax_gene.annotate(label, (x+0.1, y+0.1))
else:
for label, x, y in zip(top_by_gene.columns, top_gene_trans[:, 0], top_gene_trans[:, 1]):
if '_' in label:
label = label.split('_')[0]
ax_gene.annotate(label, (x+0.1, y+0.1))
if plot:
plt.show()
if title != '':
save_name = '_'.join(title.split(' ')[0:2])
plt.savefig(os.path.join(path_filename,save_name+'_TSNE.pdf'), bbox_inches='tight')
else:
plt.savefig(os.path.join(path_filename,'Group0_TSNE.pdf'), bbox_inches='tight')
plt.close('all')
return top_pca_list
else:
return []
#create cell and gene TSNE scatter plots (one pdf)
def plot_kmeans(args, df_by_gene, path_filename, kmeans_range, num_genes=100, gene_list_filter=False, title='', plot=False, label_map=False, gene_map = False, run_sig_test=False):
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.cm as cm
gene_list = df_by_gene.columns.tolist()
sns.set_palette("RdBu_r", 10, 1)
if gene_list_filter:
sig_by_gene = df_by_gene[gene_list_filter]
sig_by_cell = sig_by_gene.transpose()
else:
sig_by_gene = df_by_gene
sig_by_cell = sig_by_gene.transpose()
gene_pca = TruncatedSVD(n_components=3)
np_by_gene = np.asarray(sig_by_gene)
by_gene_trans = gene_pca.fit_transform(np_by_gene)
Pc_df = pd.DataFrame(gene_pca.components_.T, columns=['PC-1', 'PC-2', 'PC-3'], index=sig_by_gene.columns.tolist())
pca_rank_df = Pc_df.abs().sum(axis=1)
Pc_sort_df = pca_rank_df.nlargest(len(sig_by_gene.columns.tolist()))
top_pca_list = Pc_sort_df.index.tolist()
top_by_gene = df_by_gene[top_pca_list[0:num_genes]]
gene_top = TruncatedSVD(n_components=2)
cell_pca = TruncatedSVD(n_components=2)
top_by_cell = top_by_gene.transpose()
np_top_gene = np.asarray(top_by_cell)
np_top_cell = np.asarray(top_by_gene)
top_cell_trans = cell_pca.fit_transform(np_top_cell)
top_gene_trans = gene_top.fit_transform(np_top_gene)
range_n_clusters = range(kmeans_range[0],kmeans_range[1])
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(np_top_cell) + (n_clusters + 1) * 10])
#cluster cell PCA
cell_clusterer = KMeans(n_clusters=n_clusters)
top_cell_pred = cell_clusterer.fit_predict(top_cell_trans)
#cluster gene PCA
gene_clusterer = KMeans(n_clusters=n_clusters)
top_gene_pred = gene_clusterer.fit_predict(top_gene_trans)
pred_dict = {'SampleID':top_by_cell.columns, 'GroupID':['kmeans_'+str(p) for p in top_cell_pred]}
df_pred = pd.DataFrame(pred_dict)
cell_group_path = os.path.join(path_filename,'kmeans_cell_groups_'+str(n_clusters)+'.txt')
df_pred.to_csv(cell_group_path, sep = '\t')
#compute silouette averages and values
silhouette_avg_cell = silhouette_score(top_cell_trans, top_cell_pred)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg_cell)
silhouette_avg_gene = silhouette_score(top_gene_trans, top_gene_pred)
sample_silhouette_values_cell = silhouette_samples(top_cell_trans, top_cell_pred)
sample_silhouette_values_gene = silhouette_samples(top_gene_trans, top_gene_pred)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values_cell[top_cell_pred == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg_cell, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(top_cell_pred.astype(float) / n_clusters)
ax2.scatter(top_cell_trans[:, 0], top_cell_trans[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = cell_clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.savefig(os.path.join(path_filename,'Group0_kmeans_'+str(n_clusters)+'_clusters.pdf'), bbox_inches='tight')
plt.close('all')
#use colors to make label map compatable with heatmap
color_dict ={}
markers = ['o', 'v','D','*','x','h', 's','p','8','^','>','<', 'd','o', 'v','D','*','x','h', 's','p','8','^','>','<', 'd']
color_dict =dict(zip(top_by_cell.columns, zip(colors,[markers[pred] for pred in top_cell_pred],['kmeans_'+str(p) for p in top_cell_pred])))
group_color_dict = dict(zip(['kmeans_'+str(p) for p in top_cell_pred],zip(colors,[markers[pred] for pred in top_cell_pred])))
#run heatmap with kmeans clustering and colors
top_pca_by_gene, top_pca = return_top_pca_gene(df_by_gene, num_genes=args.gene_number)
top_pca_by_cell = top_pca_by_gene.transpose()
cell_linkage, plotted_df_by_gene, col_order = clust_heatmap(args, top_pca, top_pca_by_gene, path_filename, num_to_plot=args.gene_number, title= 'kmeans_label_with_'+str(n_clusters)+'_clusters',label_map=color_dict)
if run_sig_test:
multi_group_sig(args, df_by_gene.transpose(), cell_group_path, path_filename, group_color_dict, from_kmeans=str(n_clusters))
def clust_heatmap(args, gene_list, df_by_gene, path_filename, num_to_plot, title='', plot=False, label_map=False, gene_map=False, fontsize=18):
cell_list = df_by_gene.index.tolist()
cell_num =len(cell_list)
longest_side = max(num_to_plot,cell_num*2)
if longest_side == num_to_plot:
sns.set(context= 'poster', font_scale = .4*(num_to_plot/100))
width_heatmap = min(28+round(cell_num/50),42+round(cell_num/40))
len_heatmap = min(43+round(num_to_plot/10),58+round(num_to_plot/30))
title_set = 1.15
else:
sns.set(context= 'poster', font_scale = .6*(cell_num/120))
width_heatmap = min(42+round(cell_num/9),68+round(cell_num/40))
len_heatmap = min(47+round(num_to_plot/8),50+round(num_to_plot/30))
title_set = 1.12
font = {'size' : fontsize}
plt.rc('font', **font)
if len(str(args.z_direction)) > 1:
z_choice = str(args.z_direction)
if z_choice != 'None':
sys.exit('Please enter a valid option (0, 1, or None) for z_direction')
else:
z_choice = int(args.z_direction)
if z_choice != 0 and z_choice != 1:
sys.exit('Please enter a valid option (0, 1, or None) for z_direction')
cmap = sns.diverging_palette(255, 10, s=99, sep=1, as_cmap=True)
cluster_df = df_by_gene[gene_list[0:num_to_plot]].transpose()
cluster_df[abs(cluster_df)<3e-12] = 0.0
try:
cg = sns.clustermap(cluster_df, method=args.method, metric=args.metric, z_score=z_choice, figsize=(width_heatmap, len_heatmap), cmap =cmap)
col_order = cg.dendrogram_col.reordered_ind
row_order = cg.dendrogram_row.reordered_ind
if label_map and gene_map:
Xlabs = [cell_list[i] for i in col_order]
Xcolors = [label_map[cell][0] for cell in Xlabs]
col_colors = pd.DataFrame({'Cell Groups': Xcolors},index=Xlabs)
Xgroup_labels = [label_map[cell][2] for cell in Xlabs]
Ylabs = [gene_list[i] for i in row_order]
Ycolors = [gene_map[gene][0] for gene in Ylabs]
Ygroup_labels= [gene_map[gene][2] for gene in Ylabs]
row_colors = pd.DataFrame({'Gene Groups': Ycolors},index=Ylabs)
cg = sns.clustermap(cluster_df, method=args.method, metric=args.metric, z_score=z_choice,row_colors=row_colors, col_colors=col_colors, figsize=(width_heatmap, len_heatmap), cmap =cmap)
elif label_map:
Xlabs = [cell_list[i] for i in col_order]
Xcolors = [label_map[cell][0] for cell in Xlabs]
Xgroup_labels = [label_map[cell][2] for cell in Xlabs]
col_colors = pd.DataFrame({'Cell Groups': Xcolors},index=Xlabs)
cg = sns.clustermap(cluster_df, method=args.method, metric=args.metric, z_score=z_choice, col_colors=col_colors, figsize=(width_heatmap, len_heatmap), cmap =cmap)
elif gene_map:
Ylabs = [gene_list[i] for i in row_order]
Ycolors = [gene_map[gene][0] for gene in Ylabs]
Ygroup_labels= [gene_map[gene][2] for gene in Ylabs]
row_colors = pd.DataFrame({'Gene Groups': Ycolors},index=Ylabs)
cg = sns.clustermap(cluster_df, method=args.method, metric=args.metric, z_score=z_choice,row_colors=row_colors, figsize=(width_heatmap, len_heatmap), cmap =cmap)
cg.ax_heatmap.set_title(title, y=title_set)
cg.cax.set_title('Z-score')
if label_map:
leg_handles_cell =[]
group_seen_cell = []
for xtick, xcolor, xgroup_name in zip(cg.ax_heatmap.get_xticklabels(), Xcolors, Xgroup_labels):
xtick.set_color(xcolor)
xtick.set_rotation(270)
xtick.set_fontsize(fontsize)
if xgroup_name not in group_seen_cell:
leg_handles_cell.append(patches.Patch(color=xcolor, label=xgroup_name))
group_seen_cell.append(xgroup_name)
else:
for xtick in cg.ax_heatmap.get_xticklabels():
xtick.set_rotation(270)
xtick.set_fontsize(fontsize)
if gene_map:
leg_handles_gene =[]
group_seen_gene = []
for ytick, ycolor, ygroup_name in zip(cg.ax_heatmap.get_yticklabels(), list(reversed(Ycolors)), list(reversed(Ygroup_labels))):
ytick.set_color(ycolor)
ytick.set_rotation(0)
ytick.set_fontsize(fontsize)
if ygroup_name not in group_seen_gene:
leg_handles_gene.append(patches.Patch(color=ycolor, label=ygroup_name))
group_seen_gene.append(ygroup_name)
else:
for ytick in cg.ax_heatmap.get_yticklabels():
ytick.set_rotation(0)
ytick.set_fontsize(fontsize)
if gene_map and label_map:
gene_legend = cg.ax_heatmap.legend(handles=leg_handles_gene, loc=2, bbox_to_anchor=(1.04, 0.8), title='Gene groups', prop={'size':fontsize})
plt.setp(gene_legend.get_title(),fontsize=fontsize)
cg.ax_heatmap.add_artist(gene_legend)
cell_legend = cg.ax_heatmap.legend(handles=leg_handles_cell, loc=2, bbox_to_anchor=(1.04, 1), title='Cell groups', prop={'size':fontsize})
plt.setp(cell_legend.get_title(),fontsize=fontsize)
#cg.ax_heatmap.add_artist(cell_legend)
elif label_map:
cell_legend = cg.ax_heatmap.legend(handles=leg_handles_cell, loc=2, bbox_to_anchor=(1.04, 1), title='Cell groups', prop={'size':fontsize})
plt.setp(cell_legend.get_title(),fontsize=fontsize)
elif gene_map:
gene_legend = cg.ax_heatmap.legend(handles=leg_handles_gene, loc=2, bbox_to_anchor=(1.04, 0.8), title='Gene groups', prop={'size':fontsize})
plt.setp(gene_legend.get_title(),fontsize=fontsize)
if plot:
plt.show()
cell_linkage = cg.dendrogram_col.linkage
link_mat = pd.DataFrame(cell_linkage,
columns=['row label 1', 'row label 2', 'distance', 'no. of items in clust.'],
index=['cluster %d' %(i+1) for i in range(cell_linkage.shape[0])])
if title != '':
save_name = '_'.join(title.split(' ')[0:2])
#plot_url = py.plot_mpl(cg)
cg.savefig(os.path.join(path_filename, save_name+'_heatmap.pdf'), bbox_inches='tight')
else:
#plot_url = py.plot_mpl(cg)
cg.savefig(os.path.join(path_filename,'Group0_Heatmap_all_cells.pdf'), bbox_inches='tight')
plt.close('all')
return cell_linkage, df_by_gene[gene_list[0:num_to_plot]], col_order
except FloatingPointError:
print('Linkage distance has too many zeros. Filter to remove non-expressed genes in order to produce heatmap. Heatmap with '+ str(len(cell_list))+' will not be created.')
return False, False, False
def make_subclusters(args, cc, log2_expdf_cell, log2_expdf_cell_full, path_filename, base_name, gene_corr_list, label_map=False, gene_map=False, cluster_size=20, group_colors=False):
'''
Walks a histogram branch map 'cc' and does PCA (SVD), heatmap and correlation search for each non-overlapping
tree branch. Stops at defined cluster_size (default is 20).
'''
#initial cell group is parent
parent = cc[0][1]
#p_num is the number of cells in the parent group
p_num = cc[0][0]
#l_nums is number of members of each leaf of tree
l_nums = [x[0] for x in cc]
#cell list is the list of list of cells in each leaf of tree
c_lists = [c[1] for c in cc]
#Group ID will increment with each group so that each subcluter has a unique ID
group_ID = 0
for num_members, cell_list in zip(l_nums, c_lists):
#run all cell groups that are subgroups of the parent and greater than or equal to the cutoff cluster_size
if num_members < p_num and num_members >= cluster_size:
group_ID+=1
#save name for all files generated within this cluster i.e. 'Group_2_with_105_cells_heatmap.pdf'
current_title = 'Group_'+str(group_ID)+'_with_'+str(num_members)+'_cells'
cell_subset = log2_expdf_cell[list(set(cell_list))]
gene_subset = cell_subset.transpose()
gene_subset = gene_subset.loc[:,(gene_subset!=0).any()]
full_cell_subset = log2_expdf_cell_full[list(set(cell_list))]
full_gene_subset = full_cell_subset.transpose()
full_gene_subset = full_gene_subset.loc[:,(full_gene_subset!=0).any()]
norm_df_cell1 = np.exp2(full_cell_subset)
norm_df_cell = norm_df_cell1 -1
norm_df_cell.to_csv(os.path.join(path_filename, base_name+'_'+current_title+'_matrix.txt'), sep = '\t', index_col=0)
if gene_map:
top_pca_by_gene, top_pca = return_top_pca_gene(gene_subset, num_genes=args.gene_number)
plot_SVD(args,gene_subset, path_filename, num_genes=len(gene_subset.columns.tolist()), title=current_title, plot=False, label_map=label_map, gene_map=gene_map)
else:
top_pca_by_gene, top_pca = return_top_pca_gene(full_gene_subset, num_genes=args.gene_number)
plot_SVD(args,full_gene_subset, path_filename, num_genes=int(args.gene_number), title=current_title, plot=False, label_map=label_map)
if len(top_pca)<args.gene_number:
plot_num = len(top_pca)
else:
plot_num = args.gene_number
if top_pca != []:
top_pca_by_cell = top_pca_by_gene.transpose()
#if no_corr flag is provided (False) no correlation plots will be made
if args.no_corr:
if gene_corr_list != []:
top_genes_search = top_pca[0:50]
corr_plot(top_genes_search, full_gene_subset, path_filename, num_to_plot=3, gene_corr_list= gene_corr_list, title = current_title, label_map=label_map)
else:
top_genes_search = top_pca[0:50]
corr_plot(top_genes_search, full_gene_subset, path_filename, num_to_plot=3, title = current_title, label_map=label_map)
if gene_map:
cell_linkage, plotted_df_by_gene, col_order = clust_heatmap(args, top_pca, top_pca_by_gene, path_filename, num_to_plot=plot_num, title=current_title, plot=False, label_map=label_map, gene_map = gene_map)
else:
cell_linkage, plotted_df_by_gene, col_order = clust_heatmap(args, top_pca, top_pca_by_gene, path_filename,num_to_plot=plot_num, title=current_title, plot=False, label_map=label_map)
plt.close('all')
else:
print('Search for top genes by PCA failed in '+current_title+'. No plots will be generated for this subcluster. ')
pass
def clust_stability(args, log2_expdf_gene, path_filename, iterations, label_map=False):
sns.set(context='poster', font_scale = 1)
sns.set_palette("RdBu_r")
stability_ratio = []
total_genes = len(log2_expdf_gene.columns.tolist())
end_num = 1000
iter_list = range(100,int(round(end_num)),int(round(end_num/iterations)))
for gene_number in iter_list:
title= str(gene_number)+' genes plot.'
top_pca_by_gene, top_pca = return_top_pca_gene(df_by_gene, num_genes=gene_number)
top_pca_by_cell = top_pca_by_gene.transpose()
cell_linkage, plotted_df_by_gene, col_order = clust_heatmap(args, top_pca, top_pca_by_gene, num_to_plot=gene_number, title=title, label_map=label_map)
if gene_number == 100:
s1 = col_order
s0 = col_order
else:
s2= col_order
sm_running = difflib.SequenceMatcher(None,s1,s2)
sm_first = difflib.SequenceMatcher(None,s0,s2)
stability_ratio.append((sm_running.ratio(), sm_first.ratio()))
s1=col_order
plt.close()
x= iter_list[1:]
f, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
y1= [m[0] for m in stability_ratio]
y2= [m[1] for m in stability_ratio]
sns.barplot(x, y1, palette="RdBu_r", ax=ax1)
ax1.set_ylabel('Running ratio (new/last)')
sns.barplot(x, y2, palette="RdBu_r", ax=ax2)
ax2.set_ylabel('Ratio to 100')
plt.savefig(os.path.join(path_filename,'clustering_stability.pdf'), bbox_inches='tight')
plt.show()
plt.close('all')
return stability_ratio
#run correlation matrix and save only those above threshold
def run_corr(df_by_gene, title, path_filename, method_name='pearson', sig_threshold= 0.5, run_new=True, min_period=3, save_corrs=False):
if run_new:
if len(df_by_gene.columns.tolist())>5000:
df_by_gene, top_pca_list = return_top_pca_gene(df_by_gene, num_genes=5000)
if method_name != 'kendall':
corr_by_gene = df_by_gene.corr(method=method_name, min_periods=min_period)
else:
corr_by_gene = df_by_gene.corr(method=method_name)
df_by_cell = df_by_gene.transpose()
corr_by_cell = df_by_cell.corr()
cor = corr_by_gene
cor.loc[:,:] = np.tril(cor.values, k=-1)
cor = cor.stack()
corr_by_gene_pos = cor[cor >=sig_threshold]
corr_by_gene_neg = cor[cor <=(sig_threshold*-1)]
else:
corr_by_g_pos = open(os.path.join(path_filename,'gene_correlations_sig_pos_'+method_name+'.p'), 'rb')
corr_by_g_neg = open(os.path.join(path_filename,'gene_correlations_sig_neg_'+method_name+'.p'), 'rb')
corr_by_gene_pos = pickle.load(corr_by_g_pos)
corr_by_gene_neg = pickle.load(corr_by_g_neg)
if save_corrs:
with open(os.path.join(path_filename,'gene_correlations_sig_neg_'+method_name+'.p'), 'wb') as fp:
pickle.dump(corr_by_gene_neg, fp)
with open(os.path.join(path_filename,'gene_correlations_sig_pos_'+method_name+'.p'), 'wb') as fp0:
pickle.dump(corr_by_gene_pos, fp0)
with open(os.path.join(path_filename,'by_gene_corr.p'), 'wb') as fp1:
pickle.dump(corr_by_gene, fp1)
with open(os.path.join(path_filename,'by_cell_corr.p'), 'wb') as fp2:
pickle.dump(corr_by_cell, fp2)
cor_pos_df = pd.DataFrame(corr_by_gene_pos)
cor_neg_df = pd.DataFrame(corr_by_gene_neg)
sig_corr = cor_pos_df.append(cor_neg_df)
sig_corrs = pd.DataFrame(sig_corr[0], columns=["corr"])
if run_new:
sig_corrs.to_csv(os.path.join(path_filename, title+'_counts_corr_sig_'+method_name+'.txt'), sep = '\t')
return sig_corrs
#finds most correlated gene groups that are not overlapping
def find_top_corrs(terms_to_search, sig_corrs, num_to_return, gene_corr_list = []):
all_corrs_list = []
best_corrs_list = []
for term_to_search in terms_to_search:
corr_tup = [(term_to_search, 1)]
for index, row in sig_corrs.iterrows():
if term_to_search in index:
if index[0]==term_to_search:
corr_tup.append((index[1],row['corr']))
else:
corr_tup.append((index[0],row['corr']))
all_corrs_list.append(corr_tup)
all_corrs_list.sort(key=len, reverse=True)
good_count = 0
corr_genes_seen = []
while good_count <= num_to_return:
for i, corrs in enumerate(all_corrs_list):
if corrs[0][0] not in corr_genes_seen:
best_corrs_list.append(corrs)
good_count+=1
for g, c in corrs:
if g not in corr_genes_seen and '-' not in str(c):
corr_genes_seen.append(g)
if gene_corr_list != []:
search_corrs = []
for term in gene_corr_list:
corr_tup = [(term, 1)]
for index, row in sig_corrs.iterrows():
if term in index:
if index[0]==term:
corr_tup.append((index[1],row['corr']))
else:
corr_tup.append((index[0],row['corr']))
search_corrs.append(corr_tup)
best_corrs_list = search_corrs+best_corrs_list
return best_corrs_list[0:num_to_return+len(gene_corr_list)+1]
else:
return best_corrs_list[0:num_to_return]
#corr_plot finds and plots all correlated genes, log turns on log scale, sort plots the genes in the rank order of the gene searched
def corr_plot(terms_to_search, df_by_gene_corr, path_filename, title, num_to_plot, gene_corr_list = [], label_map=False, log=False, sort=True, sig_threshold=0.5):
size_cells = len(df_by_gene_corr.index.tolist())
figlen=int(size_cells/12)
if figlen < 15:
figlen = 15
ncol = int(figlen/3.2)
if size_cells <100:
sig_threshold = -0.137*math.log(size_cells)+1.1322
sig_corrs = run_corr(df_by_gene_corr, title, path_filename, sig_threshold=sig_threshold)
corr_list = find_top_corrs(terms_to_search, sig_corrs, num_to_plot, gene_corr_list=gene_corr_list)
for corr_tup in corr_list:
term_to_search = corr_tup[0][0]
corr_tup.sort(key=itemgetter(1), reverse=True)
corr_df = pd.DataFrame(corr_tup, columns=['GeneID', 'Correlation'])
corr_df.to_csv(os.path.join(path_filename, title+'_Corr_w_'+term_to_search+'_list.txt'), sep = '\t', index=False)
to_plot = [x[0] for x in corr_tup]
sns.set_palette(sns.cubehelix_palette(len(to_plot), start=1, rot=-.9, reverse=True))
sns.set_context("notebook", font_scale=.8, rc={"lines.linewidth": 1})
try:
sorted_df = df_by_gene_corr.sort_values(by=[term_to_search])
log2_df = np.log2(df_by_gene_corr[to_plot])
sorted_log2_df=np.log2(sorted_df[to_plot])
ylabel='Counts (log2)'
if sort and log:
ax = sorted_log2_df.plot(figsize = (figlen,10))
xlabels = sorted_log2_df[to_plot].index.values
elif sort:
ax = sorted_df[to_plot].plot(figsize = (figlen,10))
xlabels = sorted_df[to_plot].index.values
elif log:
ax = log2_df.plot(figsize = (figlen,10))
ylabel= 'log2 FPKM'
xlabels = log2_df.index.values
else:
ax = df_by_gene_corr[to_plot].plot(figsize = (figlen,10))
xlabels = df_by_gene_corr[to_plot].index.values
ax.set_xlabel('Cell #')
ax.set_ylabel(ylabel)
ax.set_title('Correlates with '+term_to_search, loc='right')
ax.xaxis.set_minor_locator(LinearLocator(numticks=len(xlabels)))
if label_map:
ax.set_xticklabels(xlabels, minor=True, rotation='vertical', fontsize=3)
Xcolors = [label_map[cell][0] for cell in xlabels]
group_labels = [label_map[cell][2] for cell in xlabels]
group_seen = []
leg_handles = []
for xtick, xcolor, group_name in zip(ax.get_xticklabels(which='minor'), Xcolors, group_labels):
xtick.set_color(xcolor)
xtick.set_rotation(90)
if group_name not in group_seen:
leg_handles.append(patches.Patch(color=xcolor, label=group_name))
group_seen.append(group_name)
else:
ax.set_xticklabels(xlabels, minor=True, rotation='vertical', fontsize=3)
ax.set_ylim([0, df_by_gene_corr[to_plot].values.max()])
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.tick_params(axis='x', which ='minor', labelsize=9)
#scale bbox anchoring to account for number of correlated genes and plot size
if len(corr_tup)>1:
bbox_height = float(1E-13)*pow(len(corr_tup),6) - float(7E-11)*pow(len(corr_tup),5) + float(1E-8)*pow(len(corr_tup),4) - float(8E-7)*pow(len(corr_tup),3) - float(3E-5)*pow(len(corr_tup),2) + 0.0086*len(corr_tup) + 1.0042
else:
bbox_height = 1.05
l_labels = [str(x[0])+' '+"%.2f" % x[1] for x in corr_tup]
if label_map:
first_legend = ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, bbox_height), ncol=ncol, prop={'size':10})
ax = plt.gca().add_artist(first_legend)
plt.legend(handles=leg_handles, loc='upper right', bbox_to_anchor=(0.9, bbox_height+.1))
else:
ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, bbox_height), ncol=ncol, prop={'size':10})
fig = plt.gcf()
fig.subplots_adjust(bottom=0.08, top=0.95, right=0.98, left=0.03)
plt.savefig(os.path.join(path_filename, title+'_corr_with_'+term_to_search+'.pdf'), bbox_inches='tight')
plt.close('all')
except KeyError:
print(term_to_search+' not in this matrix.')
pass
'''Compares each defined cell group to each other group and returns all genes with p-value and adjusted p-value.
Also creates a best_gene_list with the top genes between each group, by adjusted p-value.
Also creates a barplot of top significant genes between groups (can be unique or not).
'''
def multi_group_sig(args, full_by_cell_df, cell_group_filename, path_filename, color_dict_cell, sig_to_plot = 20, from_kmeans=''):
#create seperate file for all of the significance files
if from_kmeans == '':
multi_sig_filename = os.path.join(path_filename,'user_defined_group_pairwise_significance_files')
else:
multi_sig_filename = os.path.join(path_filename,from_kmeans+'_group_pairwise_significance_files')
try:
os.mkdir(multi_sig_filename)
except OSError:
print(multi_sig_filename+' already exists. Files will be overwritten.')
plot_pvalue = False
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
stats = importr('stats')
cell_groups_df = pd.read_csv(open(cell_group_filename,'rU'), sep=None, engine='python')
group_name_list = list(set(cell_groups_df['GroupID']))
group_pairs = list(set(itertools.permutations(group_name_list,2)))
gene_list = full_by_cell_df.index.tolist()
cell_group_ident_0 = zip(cell_groups_df['SampleID'],cell_groups_df['GroupID'])
cell_group_ident= [c for c in cell_group_ident_0]
barplot_dict = {}
by_gene_df = full_by_cell_df.transpose()
best_gene_list = []
best_gene_groups = []
best_vs_list =[]
best_pvalue_list = []
for name in group_name_list:
barplot_dict[name] = {'genes':[], 'pvalues':[], 'fold_change':[], 'Vs':[]}
for gp in group_pairs:
index_list = []
sig_gene_list = []
gp_vs_all_seen = []
sig_vs_all_gene_list = []
cell_list1 = [c[0] for c in cell_group_ident if c[1] == gp[0]]
cell_list2 = [c[0] for c in cell_group_ident if c[1] == gp[1]]
cell1_present = [c for c in set(cell_list1) if c in set(full_by_cell_df.columns.tolist())]
cell2_present = [c for c in set(cell_list2) if c in set(full_by_cell_df.columns.tolist())]
all_other_cells = [c for c in set(full_by_cell_df.columns.tolist()) if c not in set(cell_list1)]
if cell1_present != [] and cell2_present != []:
df_by_cell_1 = full_by_cell_df.ix[:,cell1_present]
df_by_cell_2 = full_by_cell_df.ix[:,cell2_present]
df_by_cell_other = full_by_cell_df.ix[:,all_other_cells]
df_by_gene_1 = df_by_cell_1.transpose()
df_by_gene_2 = df_by_cell_2.transpose()
df_by_gene_other = df_by_cell_other.transpose()
for g in gene_list:
g_pvalue = scipy.stats.f_oneway(df_by_gene_1[g], df_by_gene_2[g])
if gp[0] not in gp_vs_all_seen:
g_pvalue_all = scipy.stats.f_oneway(df_by_gene_1[g], df_by_gene_other[g])
if g_pvalue[0] > 0 and g_pvalue[1] <= 1:
if g not in [s[0] for s in sig_gene_list]:
sig_gene_list.append([g, g_pvalue[1]])
if gp[0] not in gp_vs_all_seen:
sig_vs_all_gene_list.append([g, g_pvalue_all[1]])
sig_gene_list.sort(key=lambda tup: tup[1])
if gp[0] not in gp_vs_all_seen:
sig_vs_all_gene_list.sort(key=lambda tup: tup[1])
pvalues_all = [p[1] for p in sig_vs_all_gene_list]
p_adjust_all = stats.p_adjust(FloatVector(pvalues_all), method = 'BH')
gene_index_all = [ge[0] for ge in sig_vs_all_gene_list]
mean_log2_exp_list_all = []
sig_1_2_list_all = []
mean1_list_all = []
mean2_list_all = []
for sig_gene in gene_index_all:
sig_gene_df_all = by_gene_df.ix[:,sig_gene]
mean_log2_exp_list_all.append(sig_gene_df_all.mean())
sig_cell_df_all = sig_gene_df_all.transpose()
mean_cell1_all = sig_cell_df_all[cell1_present].mean()
mean1_list_all.append(mean_cell1_all)
mean_cell_other = sig_cell_df_all[all_other_cells].mean()
mean2_list_all.append(mean_cell_other)
ratio_1_other = (mean_cell1_all+1)/(mean_cell_other+1)
sig_1_2_list_all.append(ratio_1_other)
sig_df_vs_other = pd.DataFrame({'pvalues':pvalues_all,'adjusted_p_values':p_adjust_all,'mean_all':mean_log2_exp_list_all, 'mean_'+gp[0]:mean1_list_all, 'mean_all_other':mean2_list_all, 'ratio '+gp[0]+' to everything':sig_1_2_list_all}, index=gene_index_all)
sig_df_vs_other.to_csv(os.path.join(multi_sig_filename,'sig_'+gp[0]+'_VS_all_other_pvalues.txt'), sep = '\t')
gp_vs_all_seen.append(gp[0])
pvalues = [p[1] for p in sig_gene_list]
p_adjust = stats.p_adjust(FloatVector(pvalues), method = 'BH')
gene_index = [ge[0] for ge in sig_gene_list]
mean_log2_exp_list = []
sig_1_2_list = []
mean1_list = []
mean2_list = []
for sig_gene in gene_index:
sig_gene_df = by_gene_df.ix[:,sig_gene]
mean_log2_exp_list.append(sig_gene_df.mean())
sig_cell_df = sig_gene_df.transpose()
mean_cell1 = sig_cell_df[cell1_present].mean()
mean1_list.append(mean_cell1)
mean_cell2 = sig_cell_df[cell2_present].mean()
mean2_list.append(mean_cell2)
ratio_1_2 = (mean_cell1+1)/(mean_cell2+1)
sig_1_2_list.append(ratio_1_2)
sig_df = pd.DataFrame({'pvalues':pvalues,'adjusted_p_values':p_adjust,'mean_all':mean_log2_exp_list,'mean_'+gp[0]:mean1_list, 'mean_'+gp[1]:mean2_list, 'ratio '+gp[0]+' to '+gp[1]:sig_1_2_list}, index=gene_index)
cell_names_df = pd.DataFrame({gp[0]+'_cells':pd.Series(cell1_present, index=range(len(cell1_present))), gp[1]+'_cells2':pd.Series(cell2_present, index=range(len(cell2_present)))})
sig_df.to_csv(os.path.join(multi_sig_filename,'sig_'+gp[0]+'_VS_'+gp[1]+'_pvalues.txt'), sep = '\t')
cell_names_df.to_csv(os.path.join(multi_sig_filename,'sig_'+gp[0]+'_VS_'+gp[1]+'_cells.txt'), sep = '\t')
top_fc_df1 = sig_df.loc[(sig_df['ratio '+gp[0]+' to '+gp[1]]>1.3)]
new_fc_list = [-1/x if x <0.3 else x for x in top_fc_df1['ratio '+gp[0]+' to '+gp[1]]]
top_fc_df1.loc[:,'ratio '+gp[0]+' to '+gp[1]] = new_fc_list
top_fc_df = top_fc_df1.sort_values(by='adjusted_p_values',axis=0, ascending=True)
genes = top_fc_df.index.tolist()
pvalues = top_fc_df['adjusted_p_values'].tolist()
fc = top_fc_df['ratio '+gp[0]+' to '+gp[1]].tolist()
z = zip(genes,pvalues,fc)
z_all = [s for s in z]
if args.sig_unique:
top_t = [g for g in z_all if g[0] not in barplot_dict[gp[0]]['genes']]
else:
top_t = [g for g in z_all if g[0]]
if args.exclude_genes:
hu_cc_gene_df = pd.DataFrame.from_csv(os.path.join(os.path.dirname(args.filepath),args.exclude_genes), sep='\t', header=0, index_col=False)
exclude_list = hu_cc_gene_df['GeneID'].tolist()
top_t2 = [g for g in top_t if g[0] not in barplot_dict[gp[0]]['genes'] and g[0] not in exclude_list]
else:
top_t2 = top_t
top = [list(t) for t in zip(*top_t2)]
sig_to_plot = min(len(top[0]),sig_to_plot)
if sig_to_plot != 0:
barplot_dict[gp[0]]['genes']= barplot_dict[gp[0]]['genes']+[str(gene.strip(' ')) for gene in top[0][0:sig_to_plot]]
barplot_dict[gp[0]]['pvalues']= barplot_dict[gp[0]]['pvalues']+top[1][0:sig_to_plot]
barplot_dict[gp[0]]['fold_change']= barplot_dict[gp[0]]['fold_change']+top[2][0:sig_to_plot]
barplot_dict[gp[0]]['Vs']= barplot_dict[gp[0]]['Vs']+ ['significance vs '+gp[1] for x in range(0,len(top[0][0:sig_to_plot]))]
best_gene_list = best_gene_list+top[0][0:sig_to_plot]
best_gene_groups = best_gene_groups+[gp[0] for x in range(0,len(top[0][0:sig_to_plot]))]
best_vs_list = best_vs_list + [gp[1] for x in range(0,len(top[0][0:sig_to_plot]))]
best_pvalue_list = best_pvalue_list + top[1][0:sig_to_plot]
else:
if cell1_present == []:
print(gp[1], 'not present in cell matrix')
else:
print(gp[0], 'not present in cell matrix')
fig, axs = plt.subplots(1, len(group_name_list), figsize=(23+len(group_name_list),13), sharex=False, sharey=False)
axs = axs.ravel()
color_map = {}
#plot top significant genes for each group compared to all other groups
for i, name in enumerate(group_name_list):
to_plot= barplot_dict[name]
for v in set(to_plot['Vs']):
color_map[v] = color_dict_cell[v.split("significance vs ")[-1]][0]
if color_map != {}:
g = sns.barplot(x='pvalues', y='genes', hue='Vs', data=to_plot, ax = axs[i], palette=color_map)
axs[i].set_xscale("log", nonposx='clip')
bar_list = []
if plot_pvalue:
for p in axs[i].patches:
height = p.get_height()
width = p.get_width()
bar_list.append(width)
max_bar = max(bar_list)
for p in axs[i].patches:
height = p.get_height()
width = p.get_width()
axs[i].text(max_bar*50,p.get_y()+(height), "{:.2e}".format(width))
rect = axs[i].patch
rect.set_facecolor('white')
#sns.despine(left=True, bottom=True, top=True)
axs[i].invert_xaxis()
axs[i].xaxis.set_ticks_position('none')
axs[i].yaxis.tick_right()
axs[i].set_title(name)
axs[i].legend(loc='upper left', bbox_to_anchor=(0.01, 1.11+(0.01*len(group_name_list))), ncol=1, prop={'size':15})
axs[i].set_xlabel('adjusted p-value')
for xmaj in axs[i].xaxis.get_majorticklocs():
axs[i].axvline(x=xmaj,ls='--', lw = 0.5, color='grey', alpha=0.3)
axs[i].xaxis.grid(True, which="major", linestyle='-')
plt.subplots_adjust(left=.08, wspace=.3)
if from_kmeans == '':
plt.savefig(os.path.join(path_filename,'differential_genes_foldchanges.pdf'), bbox_inches='tight')
best_gene_df = pd.DataFrame({'GeneID':best_gene_list, 'GroupID':best_gene_groups, 'Vs':best_vs_list, 'adjusted_pvalue':best_pvalue_list})
best_gene_df.to_csv(os.path.join(path_filename,'Best_Gene_list.txt'), sep = '\t')
else:
plt.savefig(os.path.join(path_filename,'kmeans_'+from_kmeans+'_differential_genes_foldchanges.pdf'), bbox_inches='tight')
best_gene_df = pd.DataFrame({'GeneID':best_gene_list, 'GroupID':best_gene_groups, 'Vs':best_vs_list, 'adjusted_pvalue':best_pvalue_list})
best_gene_df.to_csv(os.path.join(path_filename,'kmeans_'+from_kmeans+'_Best_Gene_list.txt'), sep = '\t')
'''takes cell groups and creates dictionay 'label_map' that has attached color and marker
if the same cell is assigned to multiple groups it assigns it to the first groupID
'''
def cell_color_map(args, cell_group_filename, cell_list, color_dict):
cell_groups_df = pd.read_csv(open(os.path.join(os.path.dirname(args.filepath), cell_group_filename),'rU'), sep=None, engine='python')
cell_list_1 = list(set(cell_groups_df['SampleID'].tolist()))
group_set = list(set(cell_groups_df['GroupID'].tolist()))
if len(cell_groups_df['SampleID']) == len(cell_groups_df['GroupID']):
group_seen = []
label_map = {}
cells_seen = []
for cell, group in list(set(zip(cell_groups_df['SampleID'].tolist(), cell_groups_df['GroupID'].tolist()))):
if cell not in cells_seen:
group_count = group_set.index(group)
label_map[cell] = (color_dict[group][0],color_dict[group][1],group)
cells_seen.append(cell)
non_group_cells = [c for c in cell_list if c not in cells_seen]
if non_group_cells != []:
from matplotlib import colors
all_color_list = list(colors.cnames.keys())
markers = ['o', 'v','D','*','x','h', 's','p','8','^','>','<', 'd','o', 'v','D','*','x','h', 's','p','8','^','>','<', 'd']
color_list = ['b', 'm', 'r', 'c', 'g', 'orange', 'darkslateblue']+all_color_list
for cell in non_group_cells:
label_map[cell] = (color_list[group_count+1],markers[group_count+1],'No_Group')
else:
label_map = False
return cell_list_1, label_map
#takes cell groups and creates dictionay 'label_map' that has attached color and marker
def gene_list_map(args, gene_list_file, gene_list, color_dict, exclude_list = []):
gene_df1 = pd.read_csv(open(os.path.join(os.path.dirname(args.filepath), gene_list_file),'rU'), sep=None, engine='python')
if exclude_list != []:
gene_df1 = gene_df1.ix[~gene_df1['GeneID'].isin(exclude_list)]
gene_df = gene_df1.copy()
gene_list_1 = [g for g in list(set(gene_df['GeneID'].tolist())) if g in gene_list]
if len(gene_df['GeneID']) == len(gene_df['GroupID']):
gene_label_map = {}
group_pos = 0
group_seen = ['xyz' for i in range(len(set(gene_df['GroupID'].tolist())))]
genes_seen = []
for gene, group in zip(gene_df['GeneID'].tolist(), gene_df['GroupID'].tolist()):
#if no GroupIDs are provided replace withe empty strings
try:
if math.isnan(float(group)):
group = ' '
except ValueError:
pass
if gene not in genes_seen:
if str(group) in group_seen:
pos = group_seen.index(str(group))
else:
group_seen[group_pos] = str(group)
pos = group_pos
group_pos += 1
gene_label_map[gene] = (color_dict[str(group)][0],color_dict[str(group)][1],str(group))
genes_seen.append(gene)
non_group_genes = [g for g in gene_list_1 if g not in genes_seen]
if non_group_genes != []:
all_color_list = list(colors.cnames.keys())
markers = ['o', 'v','D','*','x','h', 's','p','8','^','>','<', 'd','o', 'v','D','*','x','h', 's','p','8','^','>','<', 'd']
color_list = ['b', 'm', 'r', 'c', 'g', 'orange', 'darkslateblue']+all_color_list
for cell in non_group_genes:
gene_label_map[gene] = (color_list[group_pos+1],markers[group_pos+1],'No_ID')
else:
gene_label_map = False
return gene_list_1, gene_label_map
#this script calls qgraph R package using rpy2, for gene or cell qgraph gene or cell groups must be provided (either or both)
def run_qgraph(data, cell_group_filename, gene_filename, label_map, gene_map, path_filename, gene_or_cell, minimum = 0.25, cut = 0.4, vsize = 1.5, legend = True, borders = False):
from rpy2.robjects import pandas2ri
pandas2ri.activate()
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
robjects.r.setwd(os.path.dirname(path_filename))
qgraph = importr('qgraph')
psych = importr('psych')
if gene_or_cell=='cell':
r_dataframe = pandas2ri.py2ri(data.transpose())
cell_groups_df = pd.read_csv(open(cell_group_filename,'rU'), sep=None, engine='python')
cell_list_1 = list(set(cell_groups_df['SampleID'].tolist()))
group_set = list(set(cell_groups_df['GroupID'].tolist()))
d = defaultdict(list)
cell_list_all = data.columns.tolist()
for i, cell in enumerate(cell_list_all):
group = label_map[cell][2]
d[group].append(i+1)
label_list = robjects.vectors.StrVector(cell_list_all)
elif gene_or_cell=='gene':
r_dataframe = pandas2ri.py2ri(data.transpose())
gene_groups_df = pd.read_csv(open(gene_filename,'rU'), sep=None, engine='python')
gene_list_1 = list(set(gene_groups_df['GeneID'].tolist()))
group_set = list(set(gene_groups_df['GroupID'].tolist()))
d = defaultdict(list)
d_color = []
#color_dict2 = {'r':'red', 'm':'magenta', 'b':'blue', 'g':'green', 'c':'cyan'}
gene_list_all = data.transpose().columns.tolist()
for i, gene in enumerate(gene_list_all):
group = gene_map[gene][2]
color = gene_map[gene][0]
d[group].append(i+1)
#d_color.append(color_dict2[color])
label_list = robjects.vectors.StrVector(gene_list_all)
#d_color_r = robjects.vectors.StrVector(d_color)
group_num = len(d)
from rpy2.robjects.vectors import FloatVector
for colname in d:
d[colname] = FloatVector(d[colname])
# data frame
from rpy2.robjects.vectors import ListVector
group_data = ListVector(d)
pca = psych.principal(robjects.r.cor(r_dataframe),group_num, rotate = "promax")
robjects.r.setwd(path_filename)
qpca = qgraph.qgraph(pca, groups = group_data, layout = "circle", rotation = "promax",
minimum = 0.2, cut = 0.4, vsize = FloatVector([1.5, 15]), labels= label_list, borders = False,
vTrans = 200,filename='graph_pca_'+gene_or_cell, filetype = "pdf", height = 15, width = 15)
if gene_or_cell == 'gene':
Q = qgraph.qgraph(robjects.r.cor(r_dataframe), minimum = 0.25, cut = 0.4, vsize = 1.5, groups = group_data,
legend = True, borders = False, labels = label_list, filename='graph_'+gene_or_cell, filetype = "pdf", height = 15, width = 15)
elif gene_or_cell =='cell':
Q = qgraph.qgraph(robjects.r.cor(r_dataframe), minimum = 0.25, cut = 0.4, vsize = 1.5, groups = group_data,
legend = True, borders = False, labels = label_list, filename='graph_'+gene_or_cell, filetype = "pdf", height = 15, width = 15)
Q = qgraph.qgraph(Q, layout = "spring", overlay=True)
robjects.r.setwd(os.path.dirname(path_filename))
def main(args):
try:
new_file = os.path.join(os.path.dirname(args.filepath),args.base_name+'_scicast')
except AttributeError:
sys.exit('Please provide a valid path to a file.')
if args.verbose:
print('Making new folder for results of SCICAST clustering: '+new_file)
try:
os.mkdir(new_file)
except OSError:
print(new_file+' already exists. Files will be overwritten.')
if args.gene_list_filename:
if os.path.isfile(args.gene_list_filename):
gene_list_file = args.gene_list_filename
elif os.path.isfile(os.path.join(os.path.dirname(args.filepath),args.gene_list_filename)):
gene_list_file = os.path.join(os.path.dirname(args.filepath),args.gene_list_filename)
else:
sys.exit('Error: Cannot find gene list file. Please place the gene list file in the same directory or provide a full path.')
if args.cell_list_filename:
if os.path.isfile(args.cell_list_filename):
cell_file = args.cell_list_filename
elif os.path.isfile(os.path.join(os.path.dirname(args.filepath),args.cell_list_filename)):
cell_file = os.path.join(os.path.dirname(args.filepath),args.cell_list_filename)
else:
sys.exit('Error: Cannot find cell list file. Please place the gene list file in the same directory or provide a full path.')
try:
by_cell = pd.DataFrame.from_csv(args.filepath, sep="\t")
except OSError:
sys.exit('Please provide a valid path to a file.')
dup_gene_list = [item for item, count in collections.Counter(by_cell.index).items() if count > 1]
if len(dup_gene_list) >0:
by_cell.drop(dup_gene_list, inplace=True)
by_gene = by_cell.transpose()
#create list of genes
gene_list_inital = by_cell.index.tolist()
#create cell list
cell_list = [x for x in list(by_cell.columns.values)]
df_by_gene1 =
|
pd.DataFrame(by_gene, columns=gene_list_inital, index=cell_list)
|
pandas.DataFrame
|
#%% [markdown]
# # Fisher's method vs. min (after multiple comparison's correction)
#%%
from pkg.utils import set_warnings
set_warnings()
import csv
import datetime
import time
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.plot import subuniformity_plot
from matplotlib.transforms import Bbox
from myst_nb import glue as default_glue
from pkg.data import load_network_palette, load_node_palette, load_unmatched
from pkg.io import savefig
from pkg.plot import set_theme
from pkg.stats import binom_2samp, stochastic_block_test
from scipy.stats import binom, combine_pvalues, ks_1samp, uniform
from tqdm import tqdm
DISPLAY_FIGS = False
FILENAME = "compare_sbm_methods_sim"
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, prefix="fig")
if not DISPLAY_FIGS:
plt.close()
def glue(name, var, prefix=None):
savename = f"{FILENAME}-{name}"
if prefix is not None:
savename = prefix + ":" + savename
default_glue(savename, var, display=False)
t0 = time.time()
set_theme()
rng = np.random.default_rng(8888)
network_palette, NETWORK_KEY = load_network_palette()
node_palette, NODE_KEY = load_node_palette()
fisher_color = sns.color_palette("Set2")[2]
min_color = sns.color_palette("Set2")[3]
eric_color = sns.color_palette("Set2")[4]
method_palette = {"fisher": fisher_color, "min": min_color, "eric": eric_color}
GROUP_KEY = "simple_group"
left_adj, left_nodes = load_unmatched(side="left")
right_adj, right_nodes = load_unmatched(side="right")
left_labels = left_nodes[GROUP_KEY].values
right_labels = right_nodes[GROUP_KEY].values
#%%
stat, pvalue, misc = stochastic_block_test(
left_adj,
right_adj,
labels1=left_labels,
labels2=right_labels,
method="fisher",
combine_method="fisher",
)
#%% [markdown]
# ## Model for simulations (alternative)
# We have fit a stochastic block model to the left and right hemispheres. Say the
# probabilities of group-to-group connections *on the left* are stored in the matrix
# $B$, so that $B_{kl}$ is the probability of an edge from group $k$ to $l$.
#
# Let $\tilde{B}$ be a *perturbed* matrix of probabilities. We are interested in testing
# $H_0: B = \tilde{B}$ vs. $H_a: ... \neq ...$. To do so, we compare each
# $H_0: B_{kl} = \tilde{B}_{kl}$ using Fisher's exact test. This results in p-values for
# each $(k,l)$ comparison, $\{p_{1,1}, p_{1,2}...p_{K,K}\}$.
#
# Now, we still are after an overall test for the equality $B = \tilde{B}$. Thus, we
# need a way to combine p-values $\{p_{1,1}, p_{1,2}...p_{K,K}\}$ to get an *overall*
# p-value for our test comparing the stochastic block model probabilities. One way is
# Fisher's method; another is to take the
# minimum p-value out of a collection of p-values which have been corrected for multiple
# comparisons (say, via Bonferroni or Holm-Bonferroni).
#
# To compare how these two alternative methods of combining p-values work, we did the
# following simulation:
#
# - Let $t$ be the number of probabilities to perturb.
# - Let $\delta$ represent the strength of the perturbation (see model below).
# - For each trial:
# - Randomly select $t$ probabilities without replacement from the elements of $B$
# - For each of these elements, $\tilde{B}_{kl} = TN(B_{kl}, \delta B_{kl})$ where
# $TN$ is a truncated normal distribution, such that probabilities don't end up
# outside of [0, 1].
# - For each element *not* perturbed, $\tilde{B}_{kl} = B_{kl}$
# - Sample the number of edges from each block under each model. In other words, let
# $m_{kl}$ be the number of edges in the $(k,l)$-th block, and let $n_k, n_l$ be
# the number of edges in the $k$-th and $l$-th blocks, respectively. Then, we have
#
# $$m_{kl} \sim Binomial(n_k n_l, B_{kl})$$
#
# and likewise but with $\tilde{B}_{kl}$ for $\tilde{m}_{kl}$.
# - Run Fisher's exact test to generate a $p_{kl}$ for each $(k,l)$.
# - Run Fisher's method for combining p-values, or take the minimum p-value after
# Bonferroni correction.
# - These trials were repeated for $\delta \in \{0.1, 0.2, 0.3, 0.4, 0.5\}$ and
# $t \in \{25, 50, 75, 100, 125\}$. For each $(\delta, t)$ we ran 100 replicates of the
# model/test above.
#%% [markdown]
# ## P-values under the null
#%%
B_base = misc["probabilities1"].values
inds = np.nonzero(B_base)
base_probs = B_base[inds]
n_possible_matrix = misc["possible1"].values
ns = n_possible_matrix[inds]
n_null_sims = 100
RERUN_NULL = True
save_path = Path(
"/Users/bpedigo/JHU_code/bilateral/bilateral-connectome/results/"
"outputs/compare_sbm_methods_sim/null_results.csv"
)
if RERUN_NULL:
null_rows = []
for sim in tqdm(range(n_null_sims)):
base_samples = binom.rvs(ns, base_probs)
perturb_samples = binom.rvs(ns, base_probs)
# test on the new data
def tester(cell):
stat, pvalue = binom_2samp(
base_samples[cell],
ns[cell],
perturb_samples[cell],
ns[cell],
null_odds=1,
method="fisher",
)
return pvalue
pvalue_collection = np.vectorize(tester)(np.arange(len(base_samples)))
n_overall = len(pvalue_collection)
pvalue_collection = pvalue_collection[~np.isnan(pvalue_collection)]
n_tests = len(pvalue_collection)
n_skipped = n_overall - n_tests
row = {
"sim": sim,
"n_tests": n_tests,
"n_skipped": n_skipped,
}
for method in ["fisher", "min", "eric"]:
row = row.copy()
if method == "min":
overall_pvalue = min(pvalue_collection.min() * n_tests, 1)
row["pvalue"] = overall_pvalue
elif method == "fisher":
stat, overall_pvalue = combine_pvalues(
pvalue_collection, method="fisher"
)
row["pvalue"] = overall_pvalue
elif method == "eric":
stat, overall_pvalue = ks_1samp(
pvalue_collection, uniform(0, 1).cdf, alternative="greater"
)
row["pvalue"] = overall_pvalue
row["method"] = method
null_rows.append(row)
null_results = pd.DataFrame(null_rows)
null_results.to_csv(save_path)
else:
null_results =
|
pd.read_csv(save_path, index_col=0)
|
pandas.read_csv
|
"""
@author: <NAME>
file: main_queue.py
"""
from __future__ import print_function
from scoop import futures
import multiprocessing
import numpy as np
import pandas as pd
import timeit
import ZIPapliences as A_ZIP
class load_generation:
""" Class prepares the system for generating load
Attributes
----------
START_TIME_Q (pandas datetime): start time to generate load data
END_TIME_Q (pandas datetime): end time to generate load data
Queue_type (int): 0=inf; 1=C; 2=Ct
P_U_B (int): percentage upper boud --> e.g. 2 = 200% from the reference
physical_machine (int): 1 = single node 2 = multiple nodes
NUM_WORKERS (int): number of workers used when generating load in a single node
NUM_HOMES (int): number of homes being generated
OUT_PUT_FILE_NAME_pre (str): file path to write output
OUT_PUT_FILE_NAME (str): prefix of file name to be writen
OUT_PUT_FILE_NAME_end (str): end of file name
OUT_PUT_FILE_NAME_summary_pre (str): file path to write output
OUT_PUT_FILE_NAME_summary (str): prefix of summary file name to be writen
TIME_DELT (pandas datetime): 1 minute
TIME_DELT_FH (pandas datetime): 1 hour
TIME_DELT_FD (pandas datetime): 1 day
base_max (float): rescaling load reference uper bound
base_min (float): rescaling load reference lower bound
ref_load (pandas series): reference load
DF_A (pandas dataframe): appliances characteristics
DF_ZIP_summer (pandas dataframe): appliances participation during the summer
DF_ZIP_winter (pandas dataframe): appliances participation during the winter
DF_ZIP_spring (pandas dataframe): appliances participation during the spring
APP_parameter_list (list): input parameters
[(float) p.u. percentage of schedulable appliances 0.5=50%,
(int) appliance set size,
(int) average power rating in Watts,
(int) stander power rating in Watts,
(float) average duration in hours,
(float) stander duration in hours,
(float) average duration of the scheduling window in hours,
(float) stander duration of the scheduling window in hours]
Methods
-------
__init__ : create object with the parameters for the load generation
read_data : load input data
"""
def __init__(self,ST,ET,T,P,M,NW,NH):
""" Create load_generation object
Parameters
----------
ST (str): start time to generate load data e.g. '2014-01-01 00:00:00'
ET (str): end time to generate load data
T (int): 0=inf; 1=C; 2=Ct
P (int): percentage upper boud --> e.g. 2 = 200% from the reference
M (int): 1 = single node 2 = multiple nodes
NW (int): number of workers used when generating load in a single node
NH (int): number of homes being generated
"""
self.START_TIME_Q = pd.to_datetime(ST)
self.END_TIME_Q = pd.to_datetime(ET)
self.Queue_type = T
self.P_U_B = P
self.physical_machine = M
self.NUM_WORKERS = NW
self.NUM_HOMES = NH
self.OUT_PUT_FILE_NAME_pre = 'outputdata/multy/'
self.OUT_PUT_FILE_NAME = 'multHDF'
self.OUT_PUT_FILE_NAME_end = '.h5'
self.OUT_PUT_FILE_NAME_summary_pre = 'outputdata/summary/'
self.OUT_PUT_FILE_NAME_summary = 'summaryHDF'
#Auxiliary variables
self.TIME_DELT = pd.to_timedelta('0 days 00:01:00')
self.TIME_DELT_FH = pd.to_timedelta('0 days 01:00:00')
self.TIME_DELT_FD = pd.to_timedelta('1 days 00:00:00')
self.base_max = 5000.0
self.base_min = 100.0
#From data
self.ref_load = None
self.DF_A = None
self.DF_ZIP_summer = None
self.DF_ZIP_winter = None
self.DF_ZIP_spring = None
#DEFINITIONS APPLIANCES
self.APP_parameter_list = [0.5,100,500,100,0.5,0.25,6.0,2.0]
def read_data(self,IF='inputdata/'):
""" Load reference load and appliance data
Parameters
----------
IF (str): folder of input data
"""
# Reference Energy
sys_load = pd.read_hdf(IF+'load_data.h5')
sys_load = sys_load['load']
sys_load = sys_load[self.START_TIME_Q:self.END_TIME_Q+self.TIME_DELT_FD]#*1e6 #DATA IS IN HOURS
sys_load = sys_load.resample(self.TIME_DELT_FH).max().ffill()#fix empty locations
scale_min = sys_load[self.START_TIME_Q:self.END_TIME_Q].min()
scale_max = sys_load[self.START_TIME_Q:self.END_TIME_Q].max()
ref = sys_load
ref = self.base_min+((ref-scale_min)/(scale_max-scale_min))*(self.base_max-self.base_min)
ref.name = 'Load [W]'
ref = ref.resample(self.TIME_DELT).max().interpolate(method='polynomial', order=0,limit_direction='forward')
self.ref_load = ref
# ZIP load
self.DF_A = pd.read_csv(IF+'ZIP_appliances.csv')
self.DF_ZIP_summer = pd.read_csv(IF+'ZIP_summer.csv')
self.DF_ZIP_winter = pd.read_csv(IF+'ZIP_winter.csv')
self.DF_ZIP_spring = pd.read_csv(IF+'ZIP_spring.csv')
###########################################
# save data to file
###########################################
def save_HD5(a,b,x):
""" Save the generated load to HDF5 files
Parameters
----------
a (pandas dataframe): complete dataframe
b (pandas dataframe): summary dataframe
x (str): string number of the individual home id
"""
a.to_hdf(LG.OUT_PUT_FILE_NAME_pre+LG.OUT_PUT_FILE_NAME+x+LG.OUT_PUT_FILE_NAME_end, key=x,format='table',mode='w',dropna = True)
b.to_hdf(LG.OUT_PUT_FILE_NAME_summary_pre+LG.OUT_PUT_FILE_NAME_summary+x+LG.OUT_PUT_FILE_NAME_end, key=x,format='table',mode='w',dropna = True)
return None
###########################################
#APLAENCES seasson
###########################################
def makeAPP(DF_A,DF_ZIP_summer,DF_ZIP_winter,DF_ZIP_spring,APP_P_L):
""" Generate individual appliances set for homes during the season of the year
Parameters
----------
DF_A (pandas dataframe): appliances characteristics
DF_ZIP_summer (pandas dataframe): appliances participation during the summer
DF_ZIP_winter (pandas dataframe): appliances participation during the winter
DF_ZIP_spring (pandas dataframe): appliances participation during the spring
Returns
----------
APP_L_obj (list of applience objecs): applience objects list for seasons
"""
strataN=4 #from the ZIP study paper
c_index = np.array(DF_ZIP_summer['A_index'])
c_winter = np.array(DF_ZIP_winter.iloc[:,strataN])
c_spring = np.array(DF_ZIP_spring.iloc[:,strataN])
c_summer = np.array(DF_ZIP_summer.iloc[:,strataN])
APP_L_obj = []
APP_L_obj.append(A_ZIP.AppSET(DF_A,c_index,c_spring,APP_P_L))
APP_L_obj.append(A_ZIP.AppSET(DF_A,c_index,c_summer,APP_P_L))
APP_L_obj.append(A_ZIP.AppSET(DF_A,c_index,c_winter,APP_P_L))
return APP_L_obj
def season(date, HEMISPHERE = 'north'):
""" Informe season of the year
Parameters
----------
date (pandas datetime): time being generated
HEMISPHERE (str): north or south hemisphere
Returns
----------
s (int): indicates the season
"""
md = date.month * 100 + date.day
if ((md > 320) and (md < 621)):
s = 0 #spring
elif ((md > 620) and (md < 923)):
s = 1 #summer
elif ((md > 922) and (md < 1223)):
s = 2 #fall
else:
s = 3 #winter
if not HEMISPHERE == 'north':
s = (s + 2) % 3
if s ==2:
s=0 #spring and fall have same loads
if s == 3:
s=2
return s
def SeasonUPdate(temp):
""" Update appliance characteristics given the change in season
Parameters
----------
temp (obj): appliance set object for an individual season
Returns
----------
app_expected_load (float): expected load power in Watts
app_expected_dur (float): expected duration in hours
appliance_set (list of applience objects): applience list for a given season
t_delta_exp_dur (pandas datetime): expected appliance duration
app_index (array): index for each applience
"""
app_expected_load = temp.app_expected_load
app_expected_dur = temp.app_expected_dur
appliance_set = temp.appliance_set
t_delta_exp_dur = temp.t_delta_exp_dur
app_index = np.arange(0,len(temp.appliance_set))
return app_expected_load,app_expected_dur,appliance_set,t_delta_exp_dur,app_index
###########################################
#MAKE QUEUE MODEL C = infinity
###########################################
def solverZIPl_inf(x):
""" Generate load with C = infinity
Parameters
----------
x (str): string number of the individual home id
Returns
----------
x (str): string number of the individual home id
"""
START_TIME_Q = LG.START_TIME_Q
END_TIME_Q = LG.END_TIME_Q
ref_load = LG.ref_load
current_time = START_TIME_Q
customer_loads_GL = (ref_load*0.0).copy()
customer_loads_GL_VAR = (ref_load*0.0).copy()
L1=[];L2=[];L3=[];L4=[];L5=[];L6=[];L7=[];L8=[];L9=[];L10=[];L11=[];L12=[];L13=[];L14=[];
L1=list();L2=list();L3=list();L4=list();L5=list();L6=list();L7=list()
L8=list();L9=list();L10=list();L11=list();L12=list();L13=list();L14=list();
APP_L_obj = makeAPP(LG.DF_A,LG.DF_ZIP_summer,LG.DF_ZIP_winter,LG.DF_ZIP_spring,LG.APP_parameter_list)
app_expected_load,app_expected_dur,appliance_set,t_delta_exp_dur,app_index = SeasonUPdate(APP_L_obj[season(current_time,'north')])
dates = ref_load.index
while current_time < END_TIME_Q:
m_t_plus_delta = ref_load.asof(where=current_time+t_delta_exp_dur)
lambda_t = m_t_plus_delta / (app_expected_load*app_expected_dur) #lam(t) = m(t + E[D])/(E[D]E[L])
delta_t = np.random.exponential(1.0/lambda_t) #lambda_t is the rate parameter, numpy requires the scale which is the reciprocal of rate. Alternatively can switch the calculation of lambda_t, but this way matches the derived equations.
if delta_t < 0.00000003:
delta_t = 0.00000003
current_time += pd.to_timedelta('%s s' % (delta_t*3600.0)) #converted to seconds as some delta_t in hours was too small for pandas to parse correctly
if current_time < END_TIME_Q: #check after time is updated we are still in sim time
###########################################
#Season
###########################################
app_expected_load,app_expected_dur,appliance_set,t_delta_exp_dur,app_index = SeasonUPdate(APP_L_obj[season(current_time,'north')])
app = appliance_set[np.random.choice(app_index,size=1,replace=True)[0]]
add_time = current_time
this_app_endtime = add_time + pd.to_timedelta('%s h' % app.duration)
this_app_curtime = add_time
customer_loads_GL[dates.asof(this_app_curtime):dates.asof(this_app_endtime)] += app.power
customer_loads_GL_VAR[dates.asof(this_app_curtime):dates.asof(this_app_endtime)] += app.reactive
L1.append(dates.asof(this_app_curtime))#['start time']=dates.asof(this_app_curtime)
L2.append(pd.to_timedelta('%s h' % app.duration).round('1min'))#['duration']=pd.to_timedelta('%s h' % app.duration).round('1min')
L3.append(app.power)#['power']=app.power
L4.append(app.skedulable)#['skedulable']=app.skedulable
L5.append(pd.to_timedelta('%s h' % app.SWn).round('1min'))#['shifting window -']=pd.to_timedelta('%s h' % app.SWn).round('1min')
L6.append(pd.to_timedelta('%s h' % app.SWp).round('1min'))#['shifting window +']=pd.to_timedelta('%s h' % app.SWp).round('1min')
L7.append(app.reactive)#['reactive']=app.reactive
L8.append(app.Zp)#['Zp']=app.Zp
L9.append(app.Ip)#['Ip']=app.Ip
L10.append(app.Pp)#['Pp']=app.Pp
L11.append(app.Zq)#['Zq']=app.Zq
L12.append(app.Iq)#['Iq']=app.Iq
L13.append(app.Pq)#['Pq']=app.Pq
L14.append(app.indeX)#['indeX']=app.indeX
sagra = pd.DataFrame({'start time': L1,
'duration': L2,
'power': L3,
'skedulable': L4,
'shifting window -': L5,
'shifting window +': L6,
'reactive': L7,
'Zp': L8,
'Ip': L9,
'Pp': L10,
'Zq': L11,
'Iq': L12,
'Pq': L13,
'indeX': L14
})
sagra = sagra[sagra['start time'] >= START_TIME_Q]
sagra = sagra.reset_index(drop=True)
sagra = sagra[sagra['start time'] <= END_TIME_Q]
sagra = sagra.reset_index(drop=True)
customer_loads_GL = customer_loads_GL[START_TIME_Q:END_TIME_Q]
customer_loads_GL_VAR = customer_loads_GL_VAR[START_TIME_Q:END_TIME_Q]
activeANDreactive =
|
pd.DataFrame({'W':customer_loads_GL, 'VAR':customer_loads_GL_VAR})
|
pandas.DataFrame
|
import string
import numpy as np
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import util
@pytest.fixture
def test_df():
return pd.DataFrame(
{'col1': range(5),
'col2': range(5, 10)},
index=['a', 'b', 'c', 'd', 'e'])
@pytest.fixture
def choosers():
return pd.DataFrame(
{'var1': range(5),
'var2': range(5, 10),
'var3': ['q', 'w', 'e', 'r', 't'],
'building_id': range(100, 105)},
index=['a', 'b', 'c', 'd', 'e'])
@pytest.fixture
def rates():
return pd.DataFrame(
{'var1_min': [np.nan, np.nan, np.nan],
'var1_max': [1, np.nan, np.nan],
'var2_min': [np.nan, 7, np.nan],
'var2_max': [np.nan, 8, np.nan],
'var3': [np.nan, np.nan, 't'],
'probability_of_relocating': [1, 1, 1]})
def test_apply_filter_query(test_df):
filters = ['col1 < 3', 'col2 > 6']
filtered = util.apply_filter_query(test_df, filters)
expected = pd.DataFrame(
{'col1': [2], 'col2': [7]},
index=['c'])
|
pdt.assert_frame_equal(filtered, expected)
|
pandas.util.testing.assert_frame_equal
|
#!/usr/bin/env python3
import time
import math
import subprocess
import sys
import random
import webbrowser
import numpy as np
from datetime import datetime
from dateutil import tz
import pytz
import os, sys
from influxdb import InfluxDBClient
import operator
import copy
from collections import Counter
from scipy.stats import norm
from scipy.special import softmax
import pandas as pd
import matplotlib.pyplot as plt
import re
import tsfel
# get_ipython().system(' pip install tsfel # installing TSFEL for feature extraction')
def str2bool(v):
return v.lower() in ("true", "1", "https", "load")
def mac_to_int(mac):
res = re.match('^((?:(?:[0-9a-f]{2}):){5}[0-9a-f]{2})$', mac.lower())
if res is None:
raise ValueError('invalid mac address')
return int(res.group(0).replace(':', ''), 16)
def int_to_mac(macint):
# if type(macint) != int:
# raise ValueError('invalid integer')
newint = int(macint)
return ':'.join(['{}{}'.format(a, b)
for a, b
in zip(*[iter('{:012x}'.format(newint))]*2)])
# This function converts the time string to epoch time xxx.xxx (second.ms).
# Example: time = "2020-08-13T02:03:00.200", zone = "UTC" or "America/New_York"
# If time = "2020-08-13T02:03:00.200Z" in UTC time, then call timestamp = local_time_epoch(time[:-1], "UTC"), which removes 'Z' in the string end
def local_time_epoch(time, zone):
local_tz = pytz.timezone(zone)
localTime = datetime.strptime(time, "%Y-%m-%dT%H:%M:%S.%f")
local_dt = local_tz.localize(localTime, is_dst=None)
# utc_dt = local_dt.astimezone(pytz.utc)
epoch = local_dt.timestamp()
# print("epoch time:", epoch) # this is the epoch time in seconds, times 1000 will become epoch time in milliseconds
# print(type(epoch)) # float
return epoch
# This function converts the epoch time xxx.xxx (second.ms) to time string.
# Example: time = "2020-08-13T02:03:00.200", zone = "UTC" or "America/New_York"
# If time = "2020-08-13T02:03:00.200Z" in UTC time, then call timestamp = local_time_epoch(time[:-1], "UTC"), which removes 'Z' in the string end
def epoch_time_local(epoch, zone):
local_tz = pytz.timezone(zone)
time = datetime.fromtimestamp(epoch).astimezone(local_tz).strftime("%Y-%m-%dT%H:%M:%S.%f")
return time
# This function converts the grafana URL time to epoch time. For exmaple, given below URL
# https://sensorweb.us:3000/grafana/d/OSjxFKvGk/caretaker-vital-signs?orgId=1&var-mac=b8:27:eb:6c:6e:22&from=1612293741993&to=1612294445244
# 1612293741993 means epoch time 1612293741.993; 1612294445244 means epoch time 1612294445.244
def grafana_time_epoch(time):
return time/1000
def influx_time_epoch(time):
return time/10e8
def generate_waves(fs, fd, seconds):
# fs = 10e3
# N = 1e5
N = fs * seconds
amp = 2*np.sqrt(2)
# fd = fs/20
noise_power = 0.001 * fs / 2
time = np.arange(N) / fs
signal = amp*np.sin(2*np.pi*fd*time)
# signal += amp*np.sin(2*np.pi*(fd*1.5)*time)
signal += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
return signal, time
def select_with_gaussian_distribution(data_name, all_data, label_indexes = [('S', -2)]):
'''
label_indexes is one below:
[('H', -4)], [('R', -3)], [('S', -2)], [('D', -1)]
'''
index = label_indexes[0][1]
data = all_data[:,index]
values = list(set(data))
num_sample = int(0.8*len(data))
### Gaussian Distibution
mu = np.mean(data)
sigma = np.std(data)
n, bins = np.histogram(data, bins=len(values)-1, density=1)
y = norm.pdf(bins, mu, sigma)
### calculate propability
p_table = np.concatenate((np.array(values)[np.newaxis,:],y[np.newaxis,:]),0)
p_list = []
for each_ in data:
index = np.where(p_table[0,:]==each_)[0][0]
p_list.append(p_table[1,index])
# import pdb; pdb.set_trace()
p_arr = softmax(p_list)
sample_index = np.random.choice(np.arange(data.shape[0]), size=num_sample, p=p_arr)
result_data = all_data[sample_index]
print(f"\nPerformed data selection with gaussian distribution on {data_name}\n")
return result_data
def select_with_uniform_distribution(data_name, all_data,label_indexes = [('S', -2)]):
'''
label_indexes is one below:
[('H', -4)], [('R', -3)], [('S', -2)], [('D', -1)]
'''
index = label_indexes[0][1]
data = all_data[:,index]
minimum_num_sample = int(len(data)/len(list(set(data))))
counter_result = Counter(data)
dict_counter = dict(counter_result)
result_dict = dict_counter.copy()
# we keep them even if they have less than minimum
# for item in dict_counter.items():
# if item[1] < minimum_num_sample:
# del result_dict[item[0]]
keys = list(result_dict.keys())
result_index_list = []
for each_key in keys:
result_index_list += list(np.random.choice(np.where(data == each_key)[0], size=minimum_num_sample))
result_data = all_data[result_index_list]
print(f"\nPerformed data selection with uniform distribution on {data_name}\n")
return result_data
def label_index(label_name, labels_list = ['ID', 'Time', 'H', 'R', 'S', 'D']):
# print(labels_list.index(label_name))
return labels_list.index(label_name)-len(labels_list)
def eval_data_stats(data_set_name, data_set, labels_list = ['ID', 'Time', 'H', 'R', 'S', 'D'], show=False ):
num_labels = len(labels_list)
time_index = label_index ('Time', labels_list)
time_min = epoch_time_local(min(data_set[:, time_index]), "America/New_York")
time_max = epoch_time_local(max(data_set[:, time_index]), "America/New_York")
print(f"\n{data_set_name} statistics:")
print(f"time_min: {time_min}; time_max: {time_max}")
print(f"{num_labels} labels are: {labels_list}")
for label_name in labels_list[2:]:
index = label_index (label_name, labels_list)
target_ = data_set[:,index]
target_pd =
|
pd.DataFrame(target_,columns=[label_name])
|
pandas.DataFrame
|
"""
##############################################################################
#
# Calculate motif's activity differences Zscores
#
# AUTHOR: Maciej_Bak
# AFFILIATION: University_of_Basel
# AFFILIATION: Swiss_Institute_of_Bioinformatics
# CONTACT: <EMAIL>
# CREATED: 20-01-2020
# LICENSE: Apache_2.0
#
##############################################################################
"""
# imports
import time
import logging
import logging.handlers
from argparse import ArgumentParser, RawTextHelpFormatter
import pandas as pd
import numpy as np
def parse_arguments():
"""Parser of the command-line arguments."""
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
choices=("DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"),
default="ERROR",
help="Verbosity/Log level. Defaults to ERROR",
)
parser.add_argument(
"-l", "--logfile", dest="logfile", help="Store log to this file."
)
parser.add_argument(
"--activity-table",
dest="activity_table",
required=True,
help="Path to the table with motifs activities and their stds.",
)
parser.add_argument(
"--design-table",
dest="design_table",
required=True,
help="Path to the design table.",
)
parser.add_argument(
"--outfile",
dest="outfile",
required=True,
help="Path for the output table with Z-scores.",
)
return parser
##############################################################################
def calculate_Zscores(A_b, design_table):
"""
Calculate Zscores as the ratio: Act.Diff / Act.Diff.Std.
"""
cols_list = []
for s in design_table.index.values:
cols_list.append("A_" + s)
for s in design_table.index.values:
cols_list.append("stdA_" + s)
Zscores_df = pd.DataFrame(index=A_b.index.values)
for c in cols_list:
Zscores_df[c] = A_b[c]
# calculate per-sample activity Z-scores
for s in design_table.index.values:
Zscores_df["zscores_" + s] = Zscores_df["A_" + s] / Zscores_df["stdA_" + s]
cols_list.append("zscores_" + s)
# calcualte motif Z-scores
N_samples = len(design_table.index.values)
for s in design_table.index.values:
Zscores_df["Z^2_" + s] = Zscores_df["zscores_" + s] * Zscores_df["zscores_" + s]
Zscores_df["sum_Z^2"] = 0.0
for s in design_table.index.values:
Zscores_df["sum_Z^2"] += Zscores_df["Z^2_" + s]
Zscores_df["avg_Z^2"] = Zscores_df["sum_Z^2"] / N_samples
Zscores_df["combined.Zscore"] = np.sqrt(Zscores_df["avg_Z^2"])
# rename the columns
new_cols_list = []
for c in cols_list:
c_prefix = c.split("_")[0]
c_suffix = "_".join(c.split("_")[1:])
if c_prefix == "A":
new_cols_list.append("activities_" + c_suffix)
if c_prefix == "stdA":
new_cols_list.append("deltas_" + c_suffix)
if c_prefix == "zscores":
new_cols_list.append("zscores_" + c_suffix)
# select columns for final table:
cols_list.append("combined.Zscore")
new_cols_list.append("combined.Zscore")
Zscores_df = Zscores_df[cols_list].copy()
Zscores_df.columns = new_cols_list
return Zscores_df
def main():
"""Main body of the script."""
# read the design table
design_table = pd.read_csv(options.design_table, sep="\t", index_col=0)
# read the table with activities and their std
A_b_table = pd.read_csv(options.activity_table, sep="\t")
A_b_table_fraction = A_b_table["fraction"].copy()
del A_b_table["fraction"]
# select motifs for which the model was not run
not_fitted = A_b_table.loc[(A_b_table == 0).all(axis=1)].index.values
# select a subtable containing motifs with activities fitted
A_b_table = A_b_table.loc[~(A_b_table == 0).all(axis=1)]
# calculate Z-scores
Zscores_df = calculate_Zscores(A_b_table, design_table)
# add the rows for motifs that was not fitted
not_fitted_df = pd.DataFrame(
np.nan, index=not_fitted, columns=Zscores_df.columns.values
)
Zscores_df =
|
pd.concat([Zscores_df, not_fitted_df], axis=0)
|
pandas.concat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.