prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import pearsonr
# from mpl_toolkits.axes_grid1 import host_subplot
# import mpl_toolkits.axisartist as AA
# import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
Estacion = '6001'
df1 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/6001Historico.txt', parse_dates=[2])
Theoric_rad_method = 'GIS_Model' ##-->> PARA QUE USE EL MODELO DE Gis DEBE SER 'GIS_Model'
resolucion = 'diaria' ##-->> LAS OPCIONES SON 'diaria' U 'horaria'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## ---CALCULO DE LA RADIACIÓN TEORICA--- ##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada 10 min. Las fechas final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=10)
while start_date <= end_date:
yield start_date
start_date += delta
def serie_Kumar_Model_hora(estacion):
'Retorna un dataframe horario con la radiacion teórico con las recomendacione de Kumar elaborado por <NAME> ' \
'para el AMVA y su tesis. El dataframe original se le ordenan los datos a 12 meses ascendentes (2018), aunque pueden ' \
'pertencer a años difernetes. El resultado es para el punto seleccionado y con el archivo de Total_Timeseries.csv'
data_Model = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Radiacion_GIS/Teoricos_nati/Total_Timeseries.csv',
sep=',')
fecha_hora = [pd.to_datetime(data_Model['Unnamed: 0'], format="%Y-%m-%d %H:%M:%S")[i].to_pydatetime() for i in
range(len(data_Model['Unnamed: 0']))]
data_Model.index = fecha_hora
data_Model = data_Model.sort_index()
data_Model['Month'] = np.array(data_Model.index.month)
data_Model = data_Model.sort_values(by="Month")
fechas = []
for i in daterange('2018-01-01', '2019-01-01'):
fechas.append(i)
fechas = fechas[0:-1]
if estacion == '6001':
punto = data_Model['TS_kumar']
elif estacion == '6002':
punto = data_Model['CI_kumar']
elif estacion == '6003':
punto = data_Model['JV_kumar']
Rad_teorica = []
for i in range(len(fechas)):
mes = fechas[i].month
hora = fechas[i].hour
mint = fechas[i].minute
rad = \
np.where((data_Model.index.month == mes) & (data_Model.index.hour == hora) & (data_Model.index.minute == mint))[
0]
if len(rad) == 0:
Rad_teorica.append(np.nan)
else:
Rad_teorica.append(punto.iloc[rad].values[0])
data_Theorical = pd.DataFrame()
data_Theorical['fecha_hora'] = fechas
data_Theorical['Radiacion_Teo'] = Rad_teorica
data_Theorical.index = data_Theorical['fecha_hora']
df_hourly_theoric = data_Theorical.groupby(pd.Grouper(freq="H")).mean()
df_hourly_theoric = df_hourly_theoric[df_hourly_theoric['Radiacion_Teo'] > 0]
return df_hourly_theoric
def Elevation_RadiationTA(n, lat, lon, start):
'Para obtener la radiación en W/m2 y el ángulo de elevación del sol en grados horariamente para un número "n" de ' \
'días aun punto en una latitud y longitud determinada ( "lat-lon"como flotantes) a partir de una fecha de inicio ' \
'"start" como por ejemplo datetime.datetime(2018, 1, 1, 8).'
import pysolar
import pytz
import datetime
timezone = pytz.timezone("America/Bogota")
start_aware = timezone.localize(start)
# Calculate radiation every hour for 365 days
nhr = 24*n
dates, altitudes_deg, radiations = list(), list(), list()
for ihr in range(nhr):
date = start_aware + datetime.timedelta(hours=ihr)
altitude_deg = pysolar.solar.get_altitude(lat, lon, date)
if altitude_deg <= 0:
radiation = 0.
else:
radiation = pysolar.radiation.get_radiation_direct(date, altitude_deg)
dates.append(date)
altitudes_deg.append(altitude_deg)
radiations.append(radiation)
days = [ihr/24 for ihr in range(nhr)]
return days, altitudes_deg, radiations
if Theoric_rad_method != 'GIS_Model' and Estacion == '6001':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.259, -75.588, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6002':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.168, -75.644, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6003':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.255, -75.542, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method == 'GIS_Model':
Io_hora = serie_Kumar_Model_hora(Estacion)
print('Teorica con el modelo de KUMAR')
###############################################################################
##--------------EFICIENCIAS TEORICAS COMO PROXI DE TRANSPARENCIA-------------##
###############################################################################
'Calculo de la eficiencias teorica como proxi de la transparencia de la atmosfera'
'Para esto se hace uso de la información del piranometro y de la radiación teórica'
'de <NAME>, con esto se prentenden obtener las caracteristicas que deriven'
'del análisis estocastico, similar al de <NAME> en su tesis de doctorado.'
##------------------LECTURA DE LOS DATOS DEL EXPERIMENTO----------------------##
df_P975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
df_P350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel350.txt', sep=',', index_col =0)
df_P348 = | pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel348.txt', sep=',', index_col =0) | pandas.read_csv |
from world_viewer.world import World
import pandas as pd
import numpy as np
import warnings
# from sensible_raw.loaders import loader
import json
from math import ceil
import os
os.environ['R_HOME'] = '/home/<EMAIL>/master/lib/R'
class CNSWorld(World):
PICKLE_PATH = './pickle/' # path for cached data
RELATION_NET_PICKLE = 'CNS_relation_net'
OPINIONS_PICKLE = 'CNS_opinions'
LIKE_MINDEDNESS_PICKLE = 'CNS_like_mindedness'
CNS_TIME_BEGIN = pd.Timestamp(pd.datetime(2013, 9, 2)) # first timestamp
CNS_TIME_END = pd.Timestamp(pd.datetime(2014, 12, 31)) # last timestamp
sigma = pd.to_timedelta(3, unit='d').total_seconds()
two_sigma_sqr = 2* sigma * sigma
def __init__(self, path='', start=pd.datetime(2013, 9, 2), end=pd.datetime(2014, 12, 31)):
super().__init__()
self.path = path
self.CNS_TIME_BEGIN = start
self.CNS_TIME_END = end
def load_world(self, opinions = ['smoking'], relation_agg = 2, read_cached = False, stop=False, write_pickle = True, continous_op = False):
self.name = "CNS" + '-'.join(opinions)
self.type = "CNS"
if continous_op:
warnings.warn("No comparison of continous opinions implementet yet!")
pickle_relation_net_filename = self.RELATION_NET_PICKLE \
+ "_" + str(relation_agg) \
+ ".pkl"
pickle_opinions_filename = self.OPINIONS_PICKLE \
+ "_" + '-'.join(opinions) \
+ ".pkl"
pickle_like_mindedness_filename = self.LIKE_MINDEDNESS_PICKLE \
+ "_" + '-'.join(opinions) \
+ ".pkl"
## 0. Load time
#time = pd.DataFrame(pd.date_range(self.CNS_TIME_BEGIN, self.CNS_TIME_END, freq='W-MON'),columns=['time'])
time = pd.DataFrame(pd.date_range(self.CNS_TIME_BEGIN, self.CNS_TIME_END, freq='d'),columns=['time'])
self.time = time
## 1. Load opinions
if read_cached:
opinions_cached = False
try:
op_nodes = pd.read_pickle(self.PICKLE_PATH + pickle_opinions_filename)
opinions_cached = True
except FileNotFoundError:
warnings.warn("No cached opinions found, read opinions from file.")
opinions_cached = False
if not (read_cached and opinions_cached):
op_nodes = pd.DataFrame() # general opinion dataframe
if len(list(set(opinions) & set(["smoking","physical"]))) > 0:
op_data = pd.DataFrame() # df for loaded data
# load data
for survey in np.arange(1,4):
print('Load survey ' + str(survey))
data_s = loader.load_data("questionnaires", "survey_"+str(survey), as_dataframe=True)
data_s = data_s[data_s.user < 1000] #clean strange users
op_time = self._get_op_time(survey)
data_s = data_s.set_index('user').join(op_time)
data_s = data_s[data_s.time.astype('int') > 10]
data_s[data_s.time < self.CNS_TIME_BEGIN] = self.CNS_TIME_BEGIN
data_s[data_s.time > self.CNS_TIME_END] = self.CNS_TIME_END
data_s['survey'] = survey
data_s.reset_index(inplace=True)
op_data = pd.concat([op_data,data_s],sort=False)
#possibilitie that users filled out more than one questionaires in one week
op_data.drop_duplicates(['user','time','variable_name'], keep='last', inplace=True)
# process opinions
for opinion in opinions:
# load smoking opinions
if opinion == "smoking":
print("Process opinion data for variable: smoking")
opinion = "op_" + opinion
smoking = op_data[op_data.variable_name == b'smoke_freq'].copy()
smoking[opinion] = (smoking.response != b'nej_jeg_har_aldrig_r') \
& (smoking.response != b'nej_men_jeg_har_rget')
smoking.reset_index(inplace=True)
smoking = smoking[['user', 'time', opinion, 'survey' ]]
smoking.rename(columns={'user':'node_id'},inplace=True)
smoking = self._add_time_to_op_nodes(smoking, time, opinion)
# write into general dataframe
if op_nodes.empty:
op_nodes = smoking
else:
op_nodes = op_nodes.set_index(['node_id','time']).join(smoking.set_index(['node_id','time']), how='outer')
op_nodes.reset_index(inplace=True)
# load physical opinions
elif opinion == "physical":
print("Process opinion data for variable: physical")
opinion = "op_" + opinion
physical = op_data[op_data.variable_name == b'physical_activity'].copy()
physical.response.replace(b'ingen',0,inplace=True)
physical.response.replace(b'ca__time_om_ugen',0,inplace=True)
physical.response.replace(b'ca_1-2_timer_om_ugen',1,inplace=True)
physical.response.replace(b'ca_3-4_timer_om_ugen',2,inplace=True)
physical.response.replace(b'ca_5-6_timer_om_ugen',3,inplace=True)
physical.response.replace(b'7_timer_om_ugen_elle',4,inplace=True)
physical.rename(columns={'response':opinion, 'user':'node_id'},inplace=True)
physical = physical[['node_id', 'time', opinion, 'survey' ]]
physical = self._add_time_to_op_nodes(physical, time, opinion)
# write into general dataframe
if op_nodes.empty:
op_nodes = physical
else:
op_nodes = op_nodes.set_index(['node_id','time','survey']) \
.join(physical.set_index(['node_id','time','survey']), how='outer')
op_nodes.reset_index(inplace=True)
elif opinion == "fitness":
print("Process opinion data for variable: fitness")
opinion = "op_" + opinion
fitness = pd.read_pickle('data/op_fitness.pkl').reset_index()
fitness = fitness[['node_id','time','op_fitness_abs']]
fitness = fitness.rename(columns={"op_fitness_abs":"fitness"})
fitness["op_fitness"] = 0
fitness.sort_values(['node_id', 'time'], inplace=True)
fitness = fitness[fitness.time >= self.CNS_TIME_BEGIN]
fitness = fitness[fitness.time <= self.CNS_TIME_END]
fitness.set_index('node_id', inplace=True)
fitness.reset_index(inplace=True)
# discretize opinion
fitness.loc[fitness.fitness >= 1, "op_fitness"] = True
fitness.loc[fitness.fitness < 1, "op_fitness"] = False
# write into general dataframe
if op_nodes.empty:
op_nodes = fitness
else:
op_nodes = op_nodes.set_index(['node_id','time','survey']) \
.join(fitness.set_index(['node_id','time','survey']), how='outer')
op_nodes.reset_index(inplace=True)
else:
raise ValueError('The opinion "' + opinion + '" is unknown.')
if write_pickle: op_nodes.to_pickle(self.PICKLE_PATH + pickle_opinions_filename)
#save opinions as instance variable
self.op_nodes = op_nodes
if stop: return 0
## 3. Load relation network
relations = pd.read_pickle("data/relations.pkl")
relations.reset_index(inplace=True)
relations = relations[relations.time >= self.CNS_TIME_BEGIN]
relations = relations[relations.time <= self.CNS_TIME_END]
# take only nodes for which the opinion is known
relations = relations[relations.id_A.isin(self.op_nodes.node_id)]
relations = relations[relations.id_B.isin(self.op_nodes.node_id)]
self.a_ij = relations[['id_A', 'id_B', 'time', 'edge']]
def _get_op_time(self, survey):
with open('user_scores'+str(survey)+'.json') as f:
op_time = json.load(f)
op_time = pd.DataFrame(op_time).loc['ts'].to_frame()
op_time.index.name = 'user'
op_time.reset_index(inplace=True)
op_time.user = op_time.user.astype('int')
op_time.set_index('user',inplace=True)
op_time.rename(columns={'ts':'time'},inplace=True)
op_time.time = pd.to_datetime(op_time.time, unit='s').dt.to_period('W').dt.to_timestamp()
return op_time
def load_edges_from_bluetooth2(self, proxi, time, verbose=True): #, threshold = None, verbose=True):
proxi = proxi.copy()
# take both directions id_A->id_B, id_B->id_A
proxi_inv = proxi.rename(columns={'id_A':'id_B','id_B':'id_A'})
proxi = pd.concat([proxi, proxi_inv], sort=False)
proxi.drop_duplicates(['id_A','id_B','time'],inplace=True)
# dont count edges twice
proxi = proxi[proxi.id_A < proxi.id_B]
proxi.time = proxi.time.dt.round('D')
# count encounters per day
proxi['encounter'] = 1
proxi = proxi.groupby(['id_A','id_B','time']).encounter.sum().reset_index()
print("before")
print(proxi)
#insert time steps with no recorded encounter
proxi = proxi.groupby(['id_A','id_B'])[['time','encounter']] \
.apply( lambda p: \
pd.DataFrame(p).set_index(['time']).join(time.set_index(['time']), how='outer') \
)
proxi.reset_index(inplace=True)
# fill unknown encounters with 0
proxi.fillna(0,inplace=True)
print("after")
print(proxi)
# weighted sum over a week
proxi = proxi.groupby(['id_A','id_B'])['time','encounter'].apply(self._calc_interaction)
proxi.reset_index(inplace=True)
proxi.time = pd.to_datetime(proxi.time, unit='s')#.dt.to_period('W').dt.to_timestamp()
#proxi = proxi.groupby(['id_A','id_B','time']).mean()
self.meetings = proxi.reset_index()
#determine edges
#if threshold:
# proxi['edge'] = proxi.encounter > threshold
# print("Use a_ij threshold: " + threshold)
#else:
# proxi['edge'] = proxi.encounter > proxi.encounter.describe()['25%']
return proxi.reset_index()
def _calc_interaction(self,proxi_slice):
proxi = proxi_slice.copy()
proxi.time = proxi.time.astype('int')/1000000000.0 # to seconds
time_matrix = np.array([proxi.time.values]*len(proxi.time))
diff = time_matrix - time_matrix.transpose()
matrix = np.exp(-(diff * diff)/self.two_sigma_sqr)
filter_past = np.tril(np.ones_like(diff))
matrix *= filter_past
proxi.encounter = np.dot(matrix, proxi.encounter)
return proxi.set_index('time')
def load_edges_from_bluetooth(self, proxi, encounter_offset, freq = 'weekly', time_format = 'ms'):
proximity = proxi.copy()
proximity['encounter'] = 1
if freq == 'monthly':
# convert time to datetime format
proximity.time = pd.to_datetime(proximity.time, unit=time_format)
# aggregate monthly
proximity = proximity.groupby(['id_A','id_B', pd.Grouper(key='time', freq='M')]).encounter \
.sum() \
.reset_index() \
.sort_values('time')
elif freq == 'weekly':
# substract 6 days, because pd.Grouper counts strange
proximity.time = pd.to_datetime(proximity.time, unit=time_format) - | pd.to_timedelta(6, unit='d') | pandas.to_timedelta |
import requests, re, json, csv
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
confirmed_CSV_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv'
deaths_CSV_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv'
recovered_CSV_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv'
confirmed_total_data = []
deaths_total_data = []
recovered_total_data = []
with requests.Session() as s:
download = s.get(confirmed_CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
confirmed_total_data.append(row)
with requests.Session() as s:
download = s.get(deaths_CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
deaths_total_data.append(row)
with requests.Session() as s:
download = s.get(recovered_CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
recovered_total_data.append(row)
# confirmed_total_data[0]
confirmed_df = pd.DataFrame(confirmed_total_data[1:], columns=confirmed_total_data[0])
deaths_df = pd.DataFrame(deaths_total_data, columns=deaths_total_data[0])
recovered_total_data = | pd.DataFrame(recovered_total_data, columns=recovered_total_data[0]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mcolors
from scipy import stats
from statsmodels.formula.api import ols
import argparse
import os
import glob
import warnings
import config_utils
from functools import reduce
import random
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE)
# Which params were sampled logarithmically?
plot_logarithmic = ['learning_rate', 'weight_decay']
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--logdir', type=str, help='directory containing log files', default=None)
parser.add_argument('-c', '--csv', type=str, help='csv file from previously read log files', default=None)
parser.add_argument('-i', '--id', type=str, help='Identifier for this analysis and subdirectory to output results to.', default='temp')
parser.add_argument('-r', '--recursive', action='store_true', help='to also read any log files in sub*directories of --log')
parser.add_argument('-p', '--showplots', action='store_true', help='to show each plot prior to saving and continuing')
parser.add_argument('-t', '--histories', action='store_true', help='to plot loss histories (warning: requires slow rereading of log files).')
parser.add_argument('-f', '--full_samples', action='store_true', help='to compute results for all runs of each group; otherwise subsample for fair comparison.')
parser.add_argument('-s', '--score', default='macro_f1', help='validation score of interest', choices=['acc', 'macro_f1', 'macro_f1_main'])
maxima_of_interest = ['val_loss', 'val_acc', 'val_macro_f1', 'val_macro_f1_main', 'train_acc', 'train_macro_f1', 'train_macro_f1_main']
groupings_of_interest = [
['static', 'dynamic'],
['static', 'weights', 'noweights'],
['weights', 'noweights'],
['valueweights', 'valuezeros'],
['dot', 'cos', 'mlp'],
['softmax', 'nosoftmax'],
['keys', 'nokeys'],
['noweights_nokeys', 'weights_nokeys'],
] # Which main categories to compare
hyperparams_of_interest = {
# 'all': [
# 'entity_library',
# 'gate_type',
# 'entlib_weights',
# 'gate_nonlinearity',
# 'gate_sum_keys_values',
# 'gate_softmax',
# 'entlib_normalization',
# ],
# 'noweights': [
# 'gate_type',
# 'gate_nonlinearity',
# 'gate_softmax',
# 'entlib_normalization',
# ],
# 'noweights_nokeys': [
# 'gate_type',
# 'gate_nonlinearity',
# 'gate_softmax',
# 'entlib_normalization',
# ],
# 'weights_nokeys': [
# 'gate_type',
# 'gate_nonlinearity',
# 'gate_softmax',
# 'entlib_normalization',
# ],
# 'dynamic': [
# 'entlib_dim',
# 'gate_type',
# 'entlib_weights',
# 'gate_nonlinearity',
# 'gate_sum_keys_values',
# 'gate_softmax',
# 'entlib_normalization',
# ],
# 'static': [
# 'gate_type',
# 'gate_nonlinearity',
# 'gate_softmax',
# ],
}
num_best_to_plot_individually = 5
num_best_to_plot_jointly_per_value = 30
def _main():
global groupings_of_interest
global hyperparams_of_interest
args = parser.parse_args()
score = 'val_' + args.score
out_path = os.path.join(args.logdir or os.path.dirname(args.csv), args.id)
os.makedirs(out_path, exist_ok=True)
print('Results of analysis will be output to {0}.'.format(out_path))
# Read new results from log directory if given, and write to csv (else read straight from csv):
if args.logdir is not None:
# TODO If csv file already exist, ask for confirmation first to regenerate it (takes long!).
print('Log directory given.')
log_dir = args.logdir
logs = read_logs(log_dir, recursive=args.recursive)
if len(logs) == 0:
print("No complete logs found. Quitting.")
quit()
args.csv = os.path.join(log_dir, 'scores.csv')
write_logs_to_csv(logs, args.csv)
# Read results from csv into dafaframe and organize by group
df = read_csv(args.csv)
dfs = data_groups(df) # dictionary of dataframes
# Remove empty groups from consideration
empty_groups = [group for grouping in groupings_of_interest for group in grouping if len(dfs[group]) == 0]
empty_groups.extend([key for key in hyperparams_of_interest.keys() if len(dfs[key]) == 0])
empty_groups = list(set(empty_groups))
groupings_of_interest = [[x for x in grouping if x not in empty_groups] for grouping in groupings_of_interest]
hyperparams_of_interest = {key: hyperparams_of_interest[key] for key in hyperparams_of_interest.keys() if key not in empty_groups}
if [] in groupings_of_interest:
groupings_of_interest.remove([])
print('groupings_of_interest =', groupings_of_interest)
print('hyperparams_of_interest =', hyperparams_of_interest)
print(' (removed empty groups: {0}.)'.format(', '.join(empty_groups)))
# Output basic stats
write_summary(dfs)
print()
if args.histories:
# Create separate history loss & score plot for best logs
print('Plotting top {0} training histories.'.format(num_best_to_plot_individually))
best_model_names = dfs['all'].nlargest(num_best_to_plot_individually, score)['model_name']
best_log_paths = ['/logs/'.join(os.path.split(name)) + '.log' for name in best_model_names]
best_logs = read_logs(best_log_paths)
for i, log in enumerate(best_logs):
plot_loss_and_acc_history(log, out_path, score=score, index=i, show_plot=False)
print()
# Create boxplot of mean score per group
print('Plotting boxplots of {0}.'.format(', '.join([str(grouping) for grouping in groupings_of_interest])))
for grouping in groupings_of_interest:
# Equal number of models for each group of interest (for fair comparison)
print(grouping, [len(dfs[key]) for key in grouping])
dfs_equalized_by_group = {}
min_sample_size = min([len(dfs[key]) for key in grouping])
for key in grouping:
if len(dfs[key]) > min_sample_size:
print(' Subsampling {0} from {1} runs in group {2}'.format(min_sample_size, len(dfs[key]), key))
dfs_equalized_by_group[key] = dfs[key].sample(min_sample_size)
else:
dfs_equalized_by_group[key] = dfs[key]
boxplot_means(dfs if args.full_samples else dfs_equalized_by_group, grouping, out_path, best_per_value=30, score=score, axes_ylim=None, show_plot=args.showplots)
print()
if args.histories:
print('Plotting history plots of {0} given specified hyperparams.'.format(', '.join(hyperparams_of_interest.keys())))
# For each group, each parameter of interest for that group, draw all histories in single plot:
for group in hyperparams_of_interest.keys():
for param in hyperparams_of_interest[group]:
# See if param_name should be plotted as caterogical or continuous:
plot_categorical = False
unique_values = list(dfs[group][param].unique())
if isinstance(unique_values[0], str) or isinstance(unique_values[0], np.bool_):
plot_categorical = True
if None in unique_values: unique_values.remove(None)
if 'None' in unique_values: unique_values.remove('None')
if len(unique_values) > 1:
print(' Plotting', group, 'given', param, unique_values, '(categorical)' if plot_categorical else '(continuous)')
# If categorical, first downsample to equally many samples for each param value
if plot_categorical:
dfs_per_value = [dfs[group].loc[df[param] == value] for value in unique_values]
if plot_categorical and not args.full_samples:
min_size = min([len(d) for d in dfs_per_value])
dfs_equalized_by_value = [d.sample(min_size) for d in dfs_per_value]
dfs_to_plot = dfs_equalized_by_value
else:
dfs_to_plot = [dfs[group]]
# Then optionally take only top N best runs:
if plot_categorical and num_best_to_plot_jointly_per_value:
dfs_to_plot = [df.nlargest(num_best_to_plot_jointly_per_value, score) for df in dfs_to_plot]
model_names_per_value = [df['model_name'] for df in dfs_to_plot]
# Plotting function wants a plain list of logs:
model_names_to_plot = [model_name for model_names in model_names_per_value for model_name in model_names]
log_paths_to_plot = ['/logs/'.join(os.path.split(name)) + '.log' for name in model_names_to_plot]
logs_to_plot = read_logs(log_paths_to_plot)
plot_history_per_hyperparam(logs_to_plot, param, out_path, score=score, plot_categorical=plot_categorical, show_plot=args.showplots, id=group)
# plt.hist([dfs['static']['val_macro_f1'], dfs['noentlib']['val_macro_f1']])
# plt.show()
def read_logs(log_path, recursive=True):
if isinstance(log_path, str):
if recursive:
log_paths = glob.glob(os.path.join(log_path, '**', '*.log'), recursive=True)
else:
log_paths = glob.glob(os.path.join(log_path, '*.log'), recursive=False)
else:
log_paths = log_path # i.e., it's already a list of paths.
print(' Reading {0} log files...'.format(len(log_paths)))
# Read all logs into dataframes, collect only those that are non-empty.
logs = []
for log_name in log_paths:
log = _read_log(log_name)
if len(log.index) != 0 and (len(log.folds) == log.params['folds']):
logs.append(log)
return logs
def write_logs_to_csv(logs, csv_path):
print('Writing log file summaries to', csv_path + '.')
# Store the settings and maxima from ALL logs in scores.csv file:
with open(csv_path, "w", encoding="utf-8") as outf:
param_names = list(logs[0].params.keys())
score_names = list(logs[0].maxima.keys())
param_names.sort()
score_names.sort()
print(','.join(param_names)+','+','.join(score_names) + ',model_name', file=outf)
for log in logs:
values = [str(log.params[param]) for param in param_names]
maxima = [str(log.maxima[score]) for score in score_names]
print(','.join(values) + ',' + ','.join(maxima) + ',' + log.log_name[:-4].replace('/logs/', '/'), file=outf)
outf.close()
def read_csv(csv_path):
print('Reading results from', csv_path + '.')
df = | pd.read_csv(csv_path) | pandas.read_csv |
import pandas as pd
from product.anaiproduct import AnAIProduct
from datetime import timedelta
import pytz
from tqdm import tqdm
pd.options.mode.chained_assignment = None
from modeler.modeler import Modeler as m
from datetime import datetime, timedelta, timezone
import numpy as np
import math
import pickle
from sklearn.preprocessing import OneHotEncoder
class StockCategory(AnAIProduct):
def __init__(self,params):
super().__init__("stock_category",
{"market":{"preload":True,"tables":{"prices": | pd.DataFrame([{}]) | pandas.DataFrame |
from .card import Card
from copy import deepcopy
from pandas import DataFrame
location_in_cell_dict = {
'references':[],
'location':[],
'note':None,
}
def is_location_in_cell_dict(item):
output = False
if type(item) is dict:
if set(location_in_cell_dict)==set(item):
output = True
return output
class LocationInCellCard(Card):
def __init__(self, item=None):
super().__init__()
self.card_type = 'location in cell'
if is_location_in_cell_dict(item):
for key, value in item.items():
setattr(self,key,value)
else:
for key, value in location_in_cell_dict.items():
setattr(self, key, value)
def to_dict(self):
output = deepcopy(location_in_cell_dict)
for key in output:
output[key]=getattr(self, key)
return output
def to_pandas_DataFrame(self, with_evidences=True):
aux_dict = self.to_dict()
for key in aux_dict:
aux_dict[key]=[aux_dict[key]]
if not with_evidences:
for key in aux_dict:
try:
aux_dict[key]=aux_dict[key][0].value
except:
continue
df = | DataFrame(aux_dict) | pandas.DataFrame |
import functools
from threading import Thread
from contextlib import contextmanager
import signal
from scipy.stats._continuous_distns import _distn_names
import scipy
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath, utils
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import scipy
wqData = waterQuality.DataModelWQ('rbWN5')
siteNoLst = wqData.siteNoLst
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
with open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:
dictSite = json.load(f)
dirWRTDS = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-W', 'All')
dirOut = os.path.join(dirWRTDS, 'output')
dirPar = os.path.join(dirWRTDS, 'params')
# read a temp file
saveFile = os.path.join(dirOut, siteNoLst[0])
dfP = | pd.read_csv(saveFile, index_col=None) | pandas.read_csv |
from ..data import platemap_to_dataframe, scale_plate
import pandas as pd
def read_multiple_plates(tables, read_single, platemap=None, **kwargs):
"""Reads data for one or more plates, then merges the data together.
This function simplifies reading and data reduction where you have either
1. multiple plates, each containing separate samples, and/or
2. each sample has multiple parameters measured (e.g OD600, A450, etc).
This function produces a ``DataFrame`` where each such ``measure`` (e.g.
OD600, FITC, A450, etc.) is in a separate column, and each physical well is
in a single row.
For each entry in ``table``, this function reads each of the ``measures`` in
that table and joins those measures horizontally (one measure per column);
then it concatenates ``table``\ s vertically, such that there is one row per well.
Each ``dict`` in ``tables`` represents a single plate, which may have multiple
``measures``. Each of the ``measures`` will be read and joined by well. The
union of parameters in each ``measure`` and ``table`` will be passed as
``**kwargs`` to ``read_single``.
Each ``table`` can have several keys which serve special functions. Other
keys will be passed as ``kwargs`` to ``read_single`` as above
* ``measures``: list of dicts, each representing a different variable.
Will be merged with ``table`` (values in the ``measure`` overwrite those
in the ``table``) and passed as ``**kwargs`` to ``read_single``.
* ``platemap``: dict containing platemap metadata that will be passed to
:func:`~microplates.data.platemap_to_dataframe`. The metadata from the ``platemap``
argument and from this key will be merged
* ``transform``: function that will be called with the ``DataFrame`` and ``table``,
and should return a new, possibly modified ``DataFrame``
* ``scale``: tuple ``(from_wells, to_wells)``; will be used to call
:func:`data.scale_plate`
Examples
--------
# single plate, multiple measures (OD600, FITC), each measure is in a
# separate tab of the spreadsheet
>>> read_multiple_plates([
... { 'io': 'plate1.xlsx', 'measures': [
... { 'sheet_name':'OD600', 'measure':'OD600' },
... { 'sheet_name':'FITC', 'measure':'FITC' }
... ]}
... ], read_single = pd.read_excel )
# multiple plates, in separate excel files
>>> read_multiple_plates([
... { 'io': 'plate1.xlsx', 'measure':'OD600', 'data': {'plate':1} },
... { 'io': 'plate2.xlsx', 'measure':'OD600', 'data': {'plate':2} }
... ], read_single = pd.read_excel )
# multiple plates in different tabs of the same excel file
>>> read_multiple_plates([
... { 'sheet_name': 'plate1', 'measure':'OD600', 'data': {'plate':1} },
... { 'sheet_name': 'plate2', 'measure':'OD600', 'data': {'plate':2} }
... ], read_single = pd.read_excel, io='plates.xlsx', measure='OD600' )
# multiple plates in same excel file; can read using a function from
# a submodule of microplates.io:
>>> read_multiple_plates([
... { 'sheet_name': 'plate1', 'measure':'OD600', 'data': {'plate':1} },
... { 'sheet_name': 'plate2', 'measure':'OD600', 'data': {'plate':2} }
... ],
... read_single=microplates.io.tecan.read_single,
... path='plates.xlsx', measure='OD600' )
Parameters
----------
tables : list of dicts
See examples
read_single : function
Function to read a single plate. Generally will be a function from
the `io` submodule. The values for a single `measure` or `table` will
be used as `**kwargs` for `read_single`
platemap : dict
Platemap; will be evaluated by `data.platemap_to_dataframe` and joined
to each `table`
**kwargs : dict, optional
Additional arguments will be merged into each ``table``, with values
from the ``table`` overwriting those in ``**kwargs``.
Returns
-------
int
Description of anonymous integer return value.
"""
dfs = []
special_keys = set(["data","measures","transform","platemap","convert"])
if platemap is None:
platemap = {}
platemap = platemap_to_dataframe(platemap)
# for each file
for table in tables:
table = {**kwargs, **table}
# extract metadata to add as constant column
if "data" in table:
table_metadata = table["data"]
else:
table_metadata = {}
# if multiple tables are included in the file
if "measures" in table:
measures = table["measures"]
else:
measures = [table]
# if there is a function to modify this table, extract it
if "transform" in table:
transform = table["transform"]
else:
transform = None
# if there is a per-table platefile, grab it
if "platemap" in table:
table_platemap = table["platemap"]
else:
table_platemap = {}
table_platemap = platemap_to_dataframe(table_platemap)
# if instructions to broadcast the per-table mapfile from
# one microplate shape to another (e.g. 96 to 384), do the conversion
if "scale" in table:
convert_from, convert_to = table["scale"]
table_platemap = scale_plate(table_platemap, convert_from, convert_to)
table = {x: table[x] for x in table if x not in special_keys}
# for each table in the file
measure_dfs = []
for measure in measures:
measure_df = read_single(**{ **table, **measure })
measure_dfs.append(measure_df)
# concatenate different tables in this file, matching the wells
df = pd.concat(measure_dfs, join='inner', axis=1)
df = | pd.merge(left=table_platemap, right=df, left_index=True, right_index=True) | pandas.merge |
#!/usr/bin/env python
"""
Represent connectivity pattern using pandas DataFrame.
"""
from collections import OrderedDict
import itertools
import re
from future.utils import iteritems
from past.builtins import basestring
import networkx as nx
import numpy as np
import pandas as pd
from .plsel import Selector, SelectorMethods
from .pm import BasePortMapper
class Interface(object):
"""
Container for set of interface comprising ports.
This class contains information about a set of interfaces comprising
path-like identifiers and the attributes associated with them.
By default, each port must have at least the following attributes;
other attributes may be added:
- interface - indicates which interface a port is associated with.
- io - indicates whether the port receives input ('in') or
emits output ('out').
- type - indicates whether the port emits/receives spikes or
graded potentials.
All port identifiers in an interface must be unique. For two interfaces
to be deemed compatible, they must contain the same port identifiers and
their identifiers' 'io' attributes must be the inverse of each other
(i.e., every 'in' port in one interface must be mirrored by an 'out' port
in the other interface.
Examples
--------
>>> i = Interface('/foo[0:4],/bar[0:3]')
>>> i['/foo[0:2]', 'interface', 'io', 'type'] = [0, 'in', 'spike']
>>> i['/foo[2:4]', 'interface', 'io', 'type'] = [1, 'out', 'spike']
Attributes
----------
data : pandas.DataFrame
Port attribute data.
index : pandas.MultiIndex
Index of port identifiers.
Parameters
----------
selector : str, unicode, or sequence
Selector string (e.g., 'foo[0:2]') or sequence of token
sequences (e.g., [['foo', (0, 2)]]) describing the port
identifiers comprised by the interface.
columns : list, default = ['interface', 'io', 'type']
Data column names.
See Also
--------
plsel.SelectorMethods
"""
def __init__(self, selector='', columns=['interface', 'io', 'type']):
# All ports in an interface must contain at least the following
# attributes:
assert set(columns).issuperset(['interface', 'io', 'type'])
self.sel = SelectorMethods()
assert not(self.sel.is_ambiguous(selector))
self.num_levels = self.sel.max_levels(selector)
names = [i for i in range(self.num_levels)]
idx = self.sel.make_index(selector, names)
self.__validate_index__(idx)
self.data = pd.DataFrame(index=idx, columns=columns, dtype=object)
# Dictionary containing mappers for different port types:
self.pm = {}
def __validate_index__(self, idx):
"""
Raise an exception if the specified index will result in an invalid interface.
"""
if idx.duplicated().any():
raise ValueError('Duplicate interface index entries detected.')
def __getitem__(self, key):
if type(key) == tuple and len(key) > 1:
return self.sel.select(self.data[list(key[1:])], key[0])
else:
return self.sel.select(self.data, key)
def __setitem__ambiguous__(self, key, value):
if type(key) == tuple:
selector = key[0]
else:
selector = key
# Ensure that the specified selector can actually be used against the
# Interface's internal DataFrame:
try:
idx = self.sel.get_index(self.data, selector,
names=self.data.index.names)
except ValueError:
raise ValueError('cannot create index with '
'selector %s and column names %s' \
% (selector, str(self.data.index.names)))
# If the data specified is not a dict, convert it to a dict:
if type(key) == tuple and len(key) > 1:
if np.isscalar(value):
data = {k:value for k in key[1:]}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(key[1:]):
data={k:v for k, v in zip(key[1:], value)}
else:
raise ValueError('cannot assign specified value')
else:
if np.isscalar(value):
data = {self.data.columns[0]: value}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(self.data.columns):
data={k:v for k, v in zip(self.data.columns, value)}
else:
raise ValueError('cannot assign specified value')
for k, v in iteritems(data):
self.data[k].loc[idx] = v
def __setitem__(self, key, value):
if type(key) == tuple:
selector = key[0]
else:
selector = key
# Fall back to slower method if the selector is ambiguous:
if self.sel.is_ambiguous(selector):
self.__setitem__ambiguous__(key, value)
return
else:
selector = Selector(selector)
# Don't waste time trying to do anything if the selector is empty:
if not selector.nonempty:
return
# If the number of specified identifiers doesn't exceed the size of the
# data array, enlargement by specifying identifiers that are not in
# the index will not occur:
assert len(selector) <= len(self.data)
# If the data specified is not a dict, convert it to a dict:
if type(key) == tuple and len(key) > 1:
if np.isscalar(value):
data = {k:value for k in key[1:]}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(key[1:]):
data={k:v for k, v in zip(key[1:], value)}
else:
raise ValueError('cannot assign specified value')
else:
if np.isscalar(value):
data = {self.data.columns[0]: value}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(self.data.columns):
data={k:v for k, v in zip(self.data.columns, value)}
else:
raise ValueError('cannot assign specified value')
if selector.max_levels == 1:
s = [i for i in itertools.chain(*selector.expanded)]
else:
s = self.sel.pad_selector(selector.expanded,
len(self.index.levshape))
for k, v in iteritems(data):
self.data[k].loc[s] = v
@property
def index(self):
"""
Interface index.
"""
return self.data.index
@index.setter
def index(self, i):
self.data.index = i
@property
def interface_ids(self):
"""
Interface identifiers.
"""
return set(self.data['interface'])
@property
def io_inv(self):
"""
Returns new Interface instance with inverse input-output attributes.
Returns
-------
i : Interface
Interface instance whose 'io' attributes are the inverse of those of
the current instance.
"""
data_inv = self.data.copy()
f = lambda x: 'out' if x == 'in' else \
('in' if x == 'out' else x)
data_inv['io'] = data_inv['io'].apply(f)
return self.from_df(data_inv)
@property
def idx_levels(self):
"""
Number of levels in Interface index.
"""
if isinstance(self.data.index, pd.MultiIndex):
return len(self.index.levels)
else:
return 1
def clear(self):
"""
Clear all ports in class instance.
"""
self.data.drop(self.data.index, inplace=True)
def data_select(self, f, inplace=False):
"""
Restrict Interface data with a selection function.
Returns an Interface instance containing only those rows
whose data is passed by the specified selection function.
Parameters
----------
f : function
Selection function with a single dict argument whose keys
are the Interface's data column names.
inplace : bool, default=False
If True, update and return the given Interface instance.
Otherwise, return a new instance.
Returns
-------
i : Interface
Interface instance containing data selected by `f`.
"""
assert callable(f)
result = self.data[f(self.data)]
if inplace:
self.data = result
return self
else:
return Interface.from_df(result)
@classmethod
def from_df(cls, df):
"""
Create an Interface from a properly formatted DataFrame.
Examples
--------
>>> import plsel, pattern
>>> import pandas
>>> idx = plsel.SelectorMethods.make_index('/foo[0:2]')
>>> data = [[0, 'in', 'spike'], [1, 'out', 'gpot']]
>>> columns = ['interface', 'io', 'type']
>>> df = pandas.DataFrame(data, index=idx, columns=columns)
>>> i = pattern.Interface.from_df(df)
Parameters
----------
df : pandas.DataFrame
DataFrame with a MultiIndex and data columns 'interface',
'io', and 'type' (additional columns may also be present).
Returns
-------
i : Interface
Generated Interface instance.
Notes
-----
The contents of the specified DataFrame instance are copied into the
new Interface instance.
"""
assert set(df.columns).issuperset(['interface', 'io', 'type'])
if isinstance(df.index, pd.MultiIndex):
if len(df.index):
i = cls(df.index.tolist(), df.columns)
else:
i = cls([()], df.columns)
elif isinstance(df.index, pd.Index):
if len(df.index):
i = cls([(s,) for s in df.index.tolist()], df.columns)
else:
i = cls([()], df.columns)
else:
raise ValueError('invalid index type')
i.data = df.copy()
i.__validate_index__(i.index)
return i
@classmethod
def from_csv(cls, file_name, **kwargs):
"""
Create an Interface from a properly formatted CSV file.
Parameters
----------
file_name : str
File name of CSV file containing interface data.
kwargs : dict
Options to pass to `DataFrame.from_csv()`
Returns
-------
i : Interface
Generated Interface instance.
"""
df = pd.DataFrame.from_csv(file_name, **kwargs)
return cls.from_df(df)
@classmethod
def from_dict(cls, d):
"""
Create an Interface from a dictionary of selectors and data values.
Examples
--------
>>> d = {'/foo[0]': [0, 'in', 'gpot'], '/foo[1]': [1, 'in', 'gpot']}
>>> i = Interface.from_dict(d)
Parameters
----------
d : dict
Dictionary that maps selectors to the data that should be associated
with the corresponding ports. If a scalar, the data is assigned to
the first attribute; if an iterable, the data is assigned to the
attributes in order.
Returns
-------
i : Interface
Generated interface instance.
"""
i = cls(','.join(d.keys()))
for k, v in iteritems(d):
i[k] = v
i.data.sort_index(inplace=True)
return i
@classmethod
def from_graph(cls, g):
"""
Create an Interface from a NetworkX graph.
Examples
--------
>>> import networkx as nx
>>> g = nx.Graph()
>>> g.add_node('/foo[0]', interface=0, io='in', type='gpot')
>>> g.add_node('/foo[1]', interface=0, io='in', type='gpot')
>>> i = Interface.from_graph(g)
Parameters
----------
g : networkx.Graph
Graph whose node IDs are path-like port identifiers. The node attributes
are assigned to the ports.
Returns
-------
i : Interface
Generated interface instance.
"""
assert isinstance(g, nx.Graph)
return cls.from_dict(g.node)
@classmethod
def from_selectors(cls, sel, sel_in='', sel_out='',
sel_spike='', sel_gpot='', *sel_int_list):
"""
Create an Interface instance from selectors.
Parameters
----------
sel : str, unicode, or sequence
Selector describing all ports comprised by interface.
sel_in : str, unicode, or sequence
Selector describing the interface's input ports.
sel_out : str, unicode, or sequence
Selector describing the interface's output ports.
sel_spike : str, unicode, or sequence
Selector describing the interface's spiking ports.
sel_gpot : str, unicode, or sequence
Selector describing the interface's graded potential ports.
sel_int_list : list of str, unicode, or sequence
Selectors consecutively describing the ports associated with interface 0,
interface 1, etc.
Returns
-------
i : Interface
Generated interface instance.
"""
i = cls(sel)
i[sel_in, 'io'] = 'in'
i[sel_out, 'io'] = 'out'
i[sel_spike, 'type'] = 'spike'
i[sel_gpot, 'type'] = 'gpot'
for n, sel_int in enumerate(sel_int_list):
i[sel_int, 'interface'] = n
return i
def gpot_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to graded potential ports.
Parameters
----------
i : int
Interface identifier. If None, return all graded potential ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all graded potential ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['type'] == 'gpot']
except:
df = None
else:
try:
df = self.data[(self.data['type'] == 'gpot') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def in_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to input ports.
Parameters
----------
i : int
Interface identifier. If None, return all input ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all input ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['io'] == 'in']
except:
df = None
else:
try:
df = self.data[(self.data['io'] == 'in') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def interface_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to specific interface.
Parameters
----------
i : int
Interface identifier. If None, return all ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface
Either an Interface instance containing all ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
if tuples:
return self.index.tolist()
else:
return self.copy()
else:
try:
df = self.data[self.data['interface'] == i]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def _merge_on_interfaces(self, a, i, b):
"""
Merge contents of this and another Interface instance.
Notes
-----
If the number of levels in one Interface instance's DataFrame index is
greater than that of the other, the number of levels in the index of the
merged DataFrames instances is set to the former and the index with the
smaller number is padded with blank entries to enable Panda's merge
mechanism to function properly.
"""
assert isinstance(i, Interface)
df_left = self.data[self.data['interface'] == a]
df_right = i.data[i.data['interface'] == b]
n_left_names = len(self.data.index.names)
n_right_names = len(i.data.index.names)
# Pandas' merge mechanism fails if the number of levels in each of the
# merged MultiIndex indices differs and there is overlap of more than
# one level; we therefore pad the index with the smaller number of
# levels before attempting the merge:
if n_left_names > n_right_names:
for n in range(i.num_levels, i.num_levels+(n_left_names-n_right_names)):
new_col = str(n)
df_right[new_col] = ''
df_right.set_index(new_col, append=True, inplace=True)
elif n_left_names < n_right_names:
for n in range(self.num_levels, self.num_levels+(n_right_names-n_left_names)):
new_col = str(n)
df_left[new_col] = ''
df_left.set_index(new_col, append=True, inplace=True)
return pd.merge(df_left, df_right,
left_index=True,
right_index=True)
def get_common_ports(self, a, i, b, t=None):
"""
Get port identifiers common to this and another Interface instance.
Parameters
----------
a : int
Identifier of interface in the current instance.
i : Interface
Interface instance containing the other interface.
b : int
Identifier of interface in instance `i`.
t : str or unicode
If not None, restrict output to those identifiers with the specified
port type.
Returns
-------
result : list of tuple
Expanded port identifiers shared by the two specified Interface
instances.
Notes
-----
The number of levels of the returned port identifiers is equal to the
maximum number of levels of this Interface instance.
The order of the returned port identifiers is not guaranteed.
"""
if t is None:
x = self.data[self.data['interface'] == a]
y = i.data[i.data['interface'] == b]
else:
x = self.data[(self.data['interface'] == a) & (self.data['type'] == t)]
y = i.data[(i.data['interface'] == b) & (i.data['type'] == t)]
if isinstance(x.index, pd.MultiIndex):
x_list = [tuple(a for a in b if a != '') \
for b in x.index]
else:
x_list = [(a,) for a in x.index]
if isinstance(y.index, pd.MultiIndex):
y_list = [tuple(a for a in b if a != '') \
for b in y.index]
else:
y_list = [(a,) for a in y.index]
return list(set(x_list).intersection(y_list))
def is_compatible(self, a, i, b, allow_subsets=False):
"""
Check whether two interfaces can be connected.
Compares an interface in the current Interface instance with one in
another instance to determine whether their ports can be connected.
Parameters
----------
a : int
Identifier of interface in the current instance.
i : Interface
Interface instance containing the other interface.
b : int
Identifier of interface in instance `i`.
allow_subsets : bool
If True, interfaces that contain a compatible subset of ports are
deemed to be compatible; otherwise, all ports in the two interfaces
must be compatible.
Returns
-------
result : bool
True if both interfaces comprise the same identifiers, the set 'type'
attributes for each matching pair of identifiers in the two
interfaces match, and each identifier with an 'io' attribute set
to 'out' in one interface has its 'io' attribute set to 'in' in the
other interface.
Notes
-----
Assumes that the port identifiers in both interfaces are sorted in the
same order.
"""
# Merge the interface data on their indices (i.e., their port identifiers):
data_merged = self._merge_on_interfaces(a, i, b)
# Check whether there are compatible subsets, i.e., at least one pair of
# ports from the two interfaces that are compatible with each other:
if allow_subsets:
# If the interfaces share no identical port identifiers, they are
# incompatible:
if not len(data_merged):
return False
# Compatible identifiers must have the same non-null 'type'
# attribute and their non-null 'io' attributes must be the inverse
# of each other:
if not data_merged.apply(lambda row: \
((row['type_x'] == row['type_y']) or \
(pd.isnull(row['type_x']) and pd.isnull(row['type_y']))) and \
((row['io_x'] == 'out' and row['io_y'] == 'in') or \
(row['io_x'] == 'in' and row['io_y'] == 'out') or \
(pd.isnull(row['io_x']) and pd.isnull(row['io_y']))),
axis=1).any():
return False
# Require that all ports in the two interfaces be compatible:
else:
# If one interface contains identifiers not in the other, they are
# incompatible:
if len(data_merged) < max(len(self.data[self.data['interface'] == a]),
len(i.data[i.data['interface'] == b])):
return False
# Compatible identifiers must have the same non-null 'type'
# attribute and their non-null 'io' attributes must be the inverse
# of each other:
if not data_merged.apply(lambda row: \
((row['type_x'] == row['type_y']) or \
(pd.isnull(row['type_x']) and pd.isnull(row['type_y']))) and \
((row['io_x'] == 'out' and row['io_y'] == 'in') or \
(row['io_x'] == 'in' and row['io_y'] == 'out') or \
(pd.isnull(row['io_x']) and pd.isnull(row['io_y']))),
axis=1).all():
return False
# All tests passed:
return True
def is_in_interfaces(self, s):
"""
Check whether ports comprised by a selector are in the stored interfaces.
Parameters
----------
s : str or unicode
Port selector.
Returns
-------
result : bool
True if the comprised ports are in any of the stored interfaces.
"""
try:
# Pad the expanded selector with blanks to prevent pandas from
# spurious matches such as mistakenly validating '/foo' as being in
# an Interface that only contains the ports '/foo[0:2]':
idx = self.sel.expand(s, self.idx_levels)
if not isinstance(self.data.index, pd.MultiIndex):
idx = [x[0] for x in idx]
d = self.data['interface'].loc[idx]
if isinstance(d, int):
return True
if np.any(d.isnull().tolist()):
return False
else:
return True
except:
return self.sel.is_in(s, self.index.tolist())
def out_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to output ports.
Parameters
----------
i : int
Interface identifier. If None, return all output ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all output ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['io'] == 'out']
except:
df = None
else:
try:
df = self.data[(self.data['io'] == 'out') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def port_select(self, f, inplace=False):
"""
Restrict Interface ports with a selection function.
Returns an Interface instance containing only those rows
whose ports are passed by the specified selection function.
Parameters
----------
f : function
Selection function with a single tuple argument containing
the various columns of the Interface instance's MultiIndex.
inplace : bool, default=False
If True, update and return the given Interface instance.
Otherwise, return a new instance.
Returns
-------
i : Interface
Interface instance containing ports selected by `f`.
"""
assert callable(f)
if inplace:
self.data = self.data.select(f)
return self
else:
return Interface.from_df(self.data.select(f))
def spike_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to spiking ports.
Parameters
----------
i : int
Interface identifier. If None, return all spiking ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all spiking ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['type'] == 'spike']
except:
df = None
else:
try:
df = self.data[(self.data['type'] == 'spike') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def to_selectors(self, i=None):
"""
Retrieve Interface's port identifiers as list of path-like selectors.
Parameters
----------
i : int
Interface identifier. If set to None, return all port identifiers.
Returns
-------
selectors : list of str
List of selector strings corresponding to each port identifier.
"""
ids = self.to_tuples(i)
result = []
for t in ids:
selector = ''
for s in t:
if isinstance(s, basestring):
selector += '/'+s
else:
selector += '[%s]' % s
result.append(selector)
return result
def to_tuples(self, i=None):
"""
Retrieve Interface's port identifiers as list of tuples.
Parameters
----------
i : int
Interface identifier. If set to None, return all port identifiers.
Returns
-------
result : list of tuple
List of token tuples corresponding to each port identifier.
"""
if i is None:
if isinstance(self.index, pd.MultiIndex):
return self.index.tolist()
else:
return [(t,) for t in self.index]
try:
if isinstance(self.index, pd.MultiIndex):
return self.data[self.data['interface'] == i].index.tolist()
else:
return [(t,) for t in self.data[self.data['interface'] == i].index]
except:
return []
def which_int(self, s):
"""
Return the interface containing the identifiers comprised by a selector.
Parameters
----------
selector : str or unicode
Port selector.
Returns
-------
i : set
Set of identifiers for interfaces that contain ports comprised by
the selector.
"""
try:
idx = self.sel.expand(s, self.idx_levels)
if not isinstance(self.data.index, pd.MultiIndex):
idx = [x[0] for x in idx]
d = self.data['interface'].loc[idx]
s = set(d)
s.discard(np.nan)
return s
except:
try:
s = set(self[s, 'interface'].values.flatten())
# Ignore unset entries:
s.discard(np.nan)
return s
except KeyError:
return set()
def __copy__(self):
"""
Make a copy of this object.
"""
return self.from_df(self.data)
copy = __copy__
copy.__doc__ = __copy__.__doc__
def set_pm(self, t, pm):
"""
Set port mapper associated with a specific port type.
Parameters
----------
t : str or unicode
Port type.
pm : neurokernel.plsel.BasePortMapper
Port mapper to save.
"""
# Ensure that the ports in the specified port mapper are a subset of
# those in the interface associated with the specified type:
assert isinstance(pm, BasePortMapper)
if not self.sel.is_in(pm.index.tolist(),
self.pm[t].index.tolist()):
raise ValueError('cannot set mapper using undefined selectors')
self.pm[t] = pm.copy()
def equals(self, other):
"""
Check whether this interface is equivalent to another interface.
Parameters
----------
other : neurokernel.pattern.Interface
Interface instance to compare to this Interface.
Returns
-------
result : bool
True if the interfaces are identical.
Notes
-----
Interfaces containing the same rows in different orders are not
regarded as equivalent.
"""
assert isinstance(other, Interface)
return self.data.equals(other.data)
def __len__(self):
return self.data.__len__()
def __repr__(self):
return 'Interface\n---------\n'+self.data.__repr__()
class Pattern(object):
"""
Connectivity pattern linking sets of interface ports.
This class represents connection mappings between interfaces comprising
sets of ports. Ports are represented using path-like identifiers;
the presence of a row linking the two identifiers in the class' internal
index indicates the presence of a connection. A single data attribute
('conn') associated with defined connections is created by default.
Specific attributes may be accessed by specifying their names after the
port identifiers; if a nonexistent attribute is specified when a sequential
value is assigned, a new column for that attribute is automatically
created: ::
p['/x[0]', '/y[0]', 'conn', 'x'] = [1, 'foo']
The direction of connections between ports in a class instance determines
whether they are input or output ports. Ports may not both receive input or
emit output. Patterns may contain fan-out connections, i.e., one source port
connected to multiple destination ports, but not fan-in connections, i.e.,
multiple source ports connected to a single destination port.
Examples
--------
>>> p = Pattern('/x[0:3]','/y[0:4]')
>>> p['/x[0]', '/y[0:2]'] = 1
>>> p['/y[2]', '/x[1]'] = 1
>>> p['/y[3]', '/x[2]'] = 1
Attributes
----------
data : pandas.DataFrame
Connection attribute data.
index : pandas.MultiIndex
Index of connections.
interface : Interface
Interfaces containing port identifiers and attributes.
Parameters
----------
sel0, sel1, ...: str, unicode, or sequence
Selectors defining the sets of ports potentially connected by the
pattern. These selectors must be disjoint, i.e., no identifier
comprised by one selector may be in any other selector.
columns : sequence of str
Data column names.
See Also
--------
plsel.SelectorMethods
"""
def __init__(self, *selectors, **kwargs):
columns = kwargs.get('columns', ['conn'])
self.sel = SelectorMethods()
# Force sets of identifiers to be disjoint so that no identifier can
# denote a port in more than one set:
assert self.sel.are_disjoint(*selectors)
# Collect all of the selectors:
selector = []
for s in selectors:
if isinstance(s, Selector) and len(s) != 0:
selector.extend(s.expanded)
elif isinstance(s, basestring):
selector.extend(self.sel.parse(s))
elif np.iterable(s):
selector.extend(s)
else:
raise ValueError('invalid selector type')
# Create Interface instance containing the ports comprised by all of the
# specified selectors:
self.interface = Interface(selector)
# Set the interface identifiers associated with each of the selectors
# consecutively:
for i, s in enumerate(selectors):
self.interface[s, 'interface'] = i
# Create a MultiIndex that can store mappings between identifiers in the
# two interfaces:
self.num_levels = {'from': self.interface.num_levels,
'to': self.interface.num_levels}
names = ['from_%s' % i for i in range(self.num_levels['from'])]+ \
['to_%s' %i for i in range(self.num_levels['to'])]
levels = [[] for i in range(len(names))]
labels = [[] for i in range(len(names))]
idx = pd.MultiIndex(levels=levels, codes=labels, names=names)
self.data = pd.DataFrame(index=idx, columns=columns, dtype=object)
@property
def from_slice(self):
"""
Slice of pattern index row corresponding to source port(s).
"""
return slice(0, self.num_levels['from'])
@property
def to_slice(self):
"""
Slice of pattern index row corresponding to destination port(s).
"""
return slice(self.num_levels['from'],
self.num_levels['from']+self.num_levels['to'])
@property
def index(self):
"""
Pattern index.
"""
return self.data.index
@index.setter
def index(self, i):
self.data.index = i
@property
def interface_ids(self):
"""
Interface identifiers.
"""
return self.interface.interface_ids
@classmethod
def _create_from(cls, *selectors, **kwargs):
"""
Create a Pattern instance from the specified selectors.
Parameters
----------
sel0, sel1, ...: str
Selectors defining the sets of ports potentially connected by the
pattern. These selectors must be disjoint, i.e., no identifier comprised
by one selector may be in any other selector, and non-empty.
from_sel, to_sel : str
Selectors that describe the pattern's initial index. If specified,
both selectors must be set. If no selectors are set, the index is
initially empty.
gpot_sel, spike_sel : str
Selectors that describe the graded potential and spiking ports in a
pattern's initial index.
data : numpy.ndarray, dict, or pandas.DataFrame
Data to load store in class instance.
columns : sequence of str
Data column names.
comp_op : str
Operator to use to combine selectors into single selector that
comprises both the source and destination ports in a pattern.
validate : bool
If True, validate the index of the Pattern's DataFrame.
Returns
-------
result : Pattern
Pattern instance.
"""
from_sel = kwargs.get('from_sel', None)
to_sel = kwargs.get('to_sel', None)
gpot_sel = kwargs.get('gpot_sel', None)
spike_sel = kwargs.get('spike_sel', None)
data = kwargs.get('data', None)
columns = kwargs.get('columns', ['conn'])
comb_op = kwargs.get('comb_op', '+')
validate = kwargs.get('validate', True)
# Create empty pattern:
for s in selectors:
if not len(s):
raise ValueError('cannot create pattern with empty selector %s' % s)
p = cls(*selectors, columns=columns)
# Construct index from concatenated selectors if specified:
names = p.data.index.names
if (from_sel is None and to_sel is None):
levels = [[] for i in range(len(names))]
labels = [[] for i in range(len(names))]
idx = pd.MultiIndex(levels=levels, codes=labels, names=names)
elif isinstance(from_sel, Selector) and isinstance(to_sel, Selector):
if comb_op == '.+':
idx = p.sel.make_index(Selector.concat(from_sel, to_sel), names)
elif comb_op == '+':
idx = p.sel.make_index(Selector.prod(from_sel, to_sel), names)
else:
raise ValueError('incompatible selectors specified')
else:
idx = p.sel.make_index('(%s)%s(%s)' % (from_sel, comb_op, to_sel), names)
if validate:
p.__validate_index__(idx)
# Replace the pattern's DataFrame:
p.data = pd.DataFrame(data=data, index=idx, columns=columns, dtype=object)
# Update the `io` attributes of the pattern's interfaces:
p.interface[from_sel, 'io'] = 'in'
p.interface[to_sel, 'io'] = 'out'
# Update the `type` attributes of the pattern's interface:
if gpot_sel is not None:
p.interface[gpot_sel, 'type'] = 'gpot'
if spike_sel is not None:
p.interface[spike_sel, 'type'] = 'spike'
return p
def clear(self):
"""
Clear all connections in class instance.
"""
self.interface.clear()
self.data.drop(self.data.index, inplace=True)
@classmethod
def from_df(cls, df_int, df_pat):
"""
Create a Pattern from properly formatted DataFrames.
Parameters
----------
df_int : pandas.DataFrame
DataFrame with a MultiIndex and data columns 'interface',
'io', and 'type' (additional columns may also be present) that
describes the pattern's interfaces. The index's rows must correspond
to individual port identifiers.
df_pat : pandas.DataFrame
DataFrame with a MultiIndex and a data column 'conn' (additional
columns may also be present) that describes the connections between
ports in the pattern's interfaces. The index's level names must be
'from_0'..'from_N', 'to_0'..'to_M', where N and M are the maximum
number of levels in the pattern's two interfaces.
"""
# Create pattern with phony selectors:
pat = cls('/foo[0]', '/bar[0]')
# Check that the 'interface' column of the interface DataFrame is set:
if any(df_int['interface'].isnull()):
raise ValueError('interface column must be set')
# Create interface:
pat.interface = Interface.from_df(df_int)
# The pattern DataFrame's index must contain at least two levels:
assert isinstance(df_pat.index, pd.MultiIndex)
# Check that pattern DataFrame index levels are named correctly,
# i.e., from_0..from_N and to_0..to_N, where N is equal to
# pat.interface.num_levels:
num_levels = pat.interface.num_levels
if df_pat.index.names != ['from_%i' % i for i in range(num_levels)]+\
['to_%i' % i for i in range(num_levels)]:
raise ValueError('incorrectly named pattern index levels')
for t in df_pat.index.tolist():
from_t = t[0:num_levels]
to_t = t[num_levels:2*num_levels]
if from_t not in df_int.index or to_t not in df_int.index:
raise ValueError('pattern DataFrame contains identifiers '
'not in interface DataFrame')
pat.data = df_pat.copy()
return pat
@classmethod
def from_product(cls, *selectors, **kwargs):
"""
Create pattern from the product of identifiers comprised by two selectors.
For example: ::
p = Pattern.from_product('/foo[0:2]', '/bar[0:2]',
from_sel='/foo[0:2]', to_sel='/bar[0:2]',
data=1)
results in a pattern with the following connections: ::
'/foo[0]' -> '/bar[0]'
'/foo[0]' -> '/bar[1]'
'/foo[1]' -> '/bar[0]'
'/foo[1]' -> '/bar[1]'
Parameters
----------
sel0, sel1, ...: str
Selectors defining the sets of ports potentially connected by the
pattern. These selectors must be disjoint, i.e., no identifier comprised
by one selector may be in any other selector.
from_sel, to_sel : str
Selectors that describe the pattern's initial index. If specified,
both selectors must be set; the 'io' attribute of the ports
comprised by these selectors is respectively set to 'out' and
'in'. If no selectors are set, the index is initially empty.
gpot_sel, spike_sel : str
Selectors that describe the graded potential and spiking ports in a
pattern's initial index. If specified, the 'type' attribute of the
ports comprised by these selectors is respectively set to 'gpot'
and 'spike'.
data : numpy.ndarray, dict, or pandas.DataFrame
Data to load store in class instance.
columns : sequence of str
Data column names.
validate : bool
If True, validate the index of the Pattern's DataFrame.
Returns
-------
result : Pattern
Pattern instance.
"""
from_sel = kwargs.get('from_sel', None)
to_sel = kwargs.get('to_sel', None)
gpot_sel = kwargs.get('gpot_sel', None)
spike_sel = kwargs.get('spike_sel', None)
data = kwargs.get('data', None)
columns = kwargs.get('columns', ['conn'])
validate = kwargs.get('validate', True)
return cls._create_from(*selectors, from_sel=from_sel, to_sel=to_sel,
gpot_sel=gpot_sel, spike_sel=spike_sel,
data=data, columns=columns, comb_op='+', validate=validate)
def gpot_ports(self, i=None, tuples=False):
return self.interface.gpot_ports(i, tuples)
gpot_ports.__doc__ = Interface.gpot_ports.__doc__
def in_ports(self, i=None, tuples=False):
return self.interface.in_ports(i, tuples)
in_ports.__doc__ = Interface.in_ports.__doc__
def interface_ports(self, i=None, tuples=False):
return self.interface.interface_ports(i, tuples)
interface_ports.__doc__ = Interface.interface_ports.__doc__
def out_ports(self, i=None, tuples=False):
return self.interface.out_ports(i, tuples)
out_ports.__doc__ = Interface.out_ports.__doc__
def spike_ports(self, i=None, tuples=False):
return self.interface.spike_ports(i, tuples)
spike_ports.__doc__ = Interface.spike_ports.__doc__
def connected_ports(self, i=None, tuples=False):
"""
Return ports that are connected by the pattern.
Parameters
----------
i : int
Interface identifier.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface
Either an Interface instance containing all connected ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
Notes
-----
Returned ports are listed in lexicographic order.
"""
# Use sets to accumulate the expanded ports to avoid passing duplicates
# to DataFrame.ix.__getitem__():
ports = set()
for t in self.data.index:
ports.add(t[0:self.num_levels['from']])
ports.add(t[self.num_levels['from']:self.num_levels['from']+self.num_levels['to']])
# Sort the expanded ports so that the results are returned in
# lexicographic order:
df = self.interface.data.loc[sorted(ports)]
if i is None:
if tuples:
return df.index.tolist()
else:
return Interface.from_df(df)
else:
if tuples:
return df[df['interface'] == i].index.tolist()
else:
return Interface.from_df(df[df['interface'] == i])
@classmethod
def from_concat(cls, *selectors, **kwargs):
"""
Create pattern from the concatenation of identifers in two selectors.
For example: ::
p = Pattern.from_concat('/foo[0:2]', '/bar[0:2]',
from_sel='/foo[0:2]', to_sel='/bar[0:2]',
data=1)
results in a pattern with the following connections: ::
'/foo[0]' -> '/bar[0]'
'/foo[1]' -> '/bar[1]'
Parameters
----------
data : numpy.ndarray, dict, or pandas.DataFrame
Data to load store in class instance.
from_sel, to_sel : str
Selectors that describe the pattern's initial index. If specified,
both selectors must be set; the 'io' attribute of the ports
comprised by these selectors is respectively set to 'out' and
'in'. If no selectors are set, the index is initially empty.
gpot_sel, spike_sel : str
Selectors that describe the graded potential and spiking ports in a
pattern's initial index. If specified, the 'type' attribute of the
ports comprised by these selectors is respectively set to 'gpot'
and 'spike'.
columns : sequence of str
Data column names.
validate : bool
If True, validate the index of the Pattern's DataFrame.
Returns
-------
result : Pattern
Pattern instance.
"""
from_sel = kwargs.get('from_sel', None)
to_sel = kwargs.get('to_sel', None)
gpot_sel = kwargs.get('gpot_sel', None)
spike_sel = kwargs.get('spike_sel', None)
data = kwargs.get('data', None)
columns = kwargs.get('columns', ['conn'])
validate = kwargs.get('validate', True)
return cls._create_from(*selectors, from_sel=from_sel, to_sel=to_sel,
gpot_sel=gpot_sel, spike_sel=spike_sel,
data=data, columns=columns, comb_op='.+', validate=validate)
def __validate_index__(self, idx):
"""
Raise an exception if the specified index will result in an invalid pattern.
"""
# Prohibit duplicate connections:
if idx.duplicated().any():
raise ValueError('Duplicate pattern entries detected.')
# Prohibit fan-in connections (i.e., patterns whose index has duplicate
# 'from' port identifiers):
from_idx, to_idx = self.split_multiindex(idx,
self.from_slice, self.to_slice)
if to_idx.duplicated().any():
raise ValueError('Fan-in pattern entries detected.')
# Prohibit ports that both receive input and send output:
if set(from_idx).intersection(to_idx):
raise ValueError('Ports cannot both receive input and send output.')
def which_int(self, s):
return self.interface.which_int(s)
which_int.__doc__ = Interface.which_int.__doc__
def is_in_interfaces(self, selector):
"""
Check whether a selector is supported by any stored interface.
"""
return self.interface.is_in_interfaces(selector)
if len(self.interface[selector]) > 0:
return True
else:
return False
def connected_port_pairs(self, as_str=False):
"""
Return connections as pairs of port identifiers.
Parameters
----------
as_str : bool
If True, return connections as a list of identifier
string pairs. Otherwise, return them as pairs of token tuples.
"""
if as_str:
return [(self.sel.tokens_to_str(row[self.from_slice]),
self.sel.tokens_to_str(row[self.to_slice])) \
for row in self.data.index]
else:
return [(row[self.from_slice], row[self.to_slice]) \
for row in self.data.index]
def __setitem__(self, key, value):
# Must pass more than one argument to the [] operators:
assert type(key) == tuple
# Ensure that specified selectors refer to ports in the
# pattern's interfaces:
assert self.is_in_interfaces(key[0])
assert self.is_in_interfaces(key[1])
# Ensure that the ports are in different interfaces:
assert self.which_int(key[0]) != self.which_int(key[1])
# Expand and pad the specified 'from' and 'to' selectors:
key_0_exp = self.sel.expand(key[0], self.num_levels['from'])
key_1_exp = self.sel.expand(key[1], self.num_levels['to'])
# Concatenate the selectors:
selector = tuple(tuple(j for j in itertools.chain(*i)) \
for i in itertools.product(key_0_exp, key_1_exp))
# Try using the selector to select data from the internal DataFrame:
try:
idx = self.sel.get_index(self.data, selector,
names=self.data.index.names)
# If the select fails, try to create new rows with the index specified
# by the selector and load them with the specified data:
except:
try:
idx = self.sel.make_index(selector, self.data.index.names)
except:
raise ValueError('cannot create new rows for ambiguous selector %s' % selector)
else:
found = False
else:
found = True
# Ensure that data to set is in dict form:
if len(key) > 2:
if np.isscalar(value):
data = {k:value for k in key[2:]}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(key[2:]):
data={k:v for k, v in zip(key[2:], value)}
else:
raise ValueError('cannot assign specified value')
else:
if np.isscalar(value):
data = {self.data.columns[0]: value}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(self.data.columns):
data={k:v for k, v in zip(self.data.columns, value)}
else:
raise ValueError('cannot assign specified value')
# If the specified selectors correspond to existing entries,
# set their attributes:
if found:
for k, v in iteritems(data):
self.data[k].loc[idx] = v
# Otherwise, populate a new DataFrame with the specified attributes:
else:
new_data = self.data.append(pd.DataFrame(data=data, index=idx,
dtype=object))
# Validate updated DataFrame's index before updating the instance's
# data attribute:
self.__validate_index__(new_data.index)
self.data = new_data
self.data.sort_index(inplace=True)
# Update the `io` attributes of the pattern's interfaces:
self.interface[key[0], 'io'] = 'in'
self.interface[key[1], 'io'] = 'out'
def __getitem__(self, key):
assert len(key) >= 2
sel_0 = self.sel.expand(key[0])
sel_1 = self.sel.expand(key[1])
selector = [f+t for f, t in itertools.product(sel_0, sel_1)]
if len(key) > 2:
return self.sel.select(self.data[list(key[2:])], selector=selector)
else:
return self.sel.select(self.data, selector=selector)
def src_idx(self, src_int, dest_int,
src_type=None, dest_type=None, dest_ports=None, duplicates=False):
"""
Retrieve source ports connected to the specified destination ports.
Examples
--------
>>> p = Pattern('/foo[0:4]', '/bar[0:4]')
>>> p['/foo[0]', '/bar[0]'] = 1
>>> p['/foo[1]', '/bar[1]'] = 1
>>> p['/foo[2]', '/bar[2]'] = 1
>>> p['/bar[3]', '/foo[3]'] = 1
>>> all(p.src_idx(0, 1, dest_ports='/bar[0,1]') == [('foo', 0), ('foo', 1)])
True
Parameters
----------
src_int, dest_int : int
Source and destination interface identifiers.
src_type, dest_type : str
Types of source and destination ports as listed in their respective
interfaces.
dest_ports : str
Path-like selector corresponding to ports in destination
interface. If not specified, all ports in the destination
interface are considered.
duplicates : bool
If True, include duplicate ports in output.
Returns
-------
idx : list of tuple
Source ports connected to the specified destination ports.
"""
assert src_int != dest_int
assert src_int in self.interface.interface_ids and \
dest_int in self.interface.interface_ids
# Filter destination ports by specified type:
if dest_type is None:
to_int = self.interface.interface_ports(dest_int)
else:
if dest_type == 'gpot':
to_int = self.interface.gpot_ports(dest_int)
elif dest_type == 'spike':
to_int = self.interface.spike_ports(dest_int)
else:
to_f = lambda x: x['type'] == dest_type
to_int = self.interface.interface_ports(dest_int).data_select(to_f)
# Filter destination ports by specified ports:
if dest_ports is None:
to_idx = set(to_int.index)
else:
to_idx = set(to_int[dest_ports].index)
# Filter source ports by specified type:
if src_type is None:
from_int = self.interface.interface_ports(src_int)
else:
if src_type == 'gpot':
from_int = self.interface.gpot_ports(src_int)
elif src_type == 'spike':
from_int = self.interface.spike_ports(src_int)
else:
from_f = lambda x: x['type'] == src_type
from_int = self.interface.interface_ports(src_int).data_select(from_f)
from_idx = set(from_int.index)
idx = []
for x in self.data.index:
tmp1 = x[self.from_slice]
if tmp1 in from_idx:
if x[self.to_slice] in to_idx:
idx.append(tmp1)
if not duplicates:
# Remove duplicate tuples from output without perturbing the order
# of the remaining tuples:
return list(OrderedDict.fromkeys(idx).keys())
else:
return idx
def dest_idx(self, src_int, dest_int,
src_type=None, dest_type=None, src_ports=None):
"""
Retrieve destination ports connected to the specified source ports.
Examples
--------
>>> p = Pattern('/foo[0:4]', '/bar[0:4]')
>>> p['/foo[0]', '/bar[0]'] = 1
>>> p['/foo[1]', '/bar[1]'] = 1
>>> p['/foo[2]', '/bar[2]'] = 1
>>> p['/bar[3]', '/foo[3]'] = 1
>>> all(p.dest_idx(0, 1, src_ports='/foo[0,1]') == [('bar', 0), ('bar', 1)])
True
Parameters
----------
src_int, dest_int : int
Source and destination interface identifiers.
src_type, dest_type : str
Types of source and destination ports as listed in their respective
interfaces.
src_ports : str
Path-like selector corresponding to ports in source
interface. If not specified, all ports in the source
interface are considered.
Returns
-------
idx : list of tuple
Destination ports connected to the specified source ports.
Notes
-----
No `duplicates` parameter is provided because fan-in from multiple
source ports to a single destination port is not permitted.
"""
assert src_int != dest_int
assert src_int in self.interface.interface_ids and \
dest_int in self.interface.interface_ids
# Filter source ports by specified type:
if src_type is None:
from_int = self.interface.interface_ports(src_int)
else:
if src_type == 'gpot':
from_int = self.interface.gpot_ports(src_int)
elif dest_type == 'spike':
from_int = self.interface.spike_ports(src_int)
else:
from_f = lambda x: x['type'] == src_type
from_int = self.interface.interface_ports(src_int).data_select(from_f)
# Filter source ports by specified ports:
if src_ports is None:
from_idx = set(from_int.index)
else:
from_idx = set(from_int[src_ports].index)
# Filter destination ports by specified type:
if dest_type is None:
to_int = self.interface.interface_ports(dest_int)
else:
if dest_type == 'gpot':
to_int = self.interface.gpot_ports(dest_int)
elif dest_type == 'spike':
to_int = self.interface.spike_ports(dest_int)
else:
to_f = lambda x: x['type'] == dest_type
to_int = self.interface.interface_ports(dest_int).data_select(to_f)
to_idx = set(to_int.index)
idx = []
for x in self.data.index:
tmp1 = x[self.to_slice]
if tmp1 in to_idx:
if x[self.from_slice] in from_idx:
idx.append(tmp1)
# Remove duplicate tuples from output without perturbing the order
# of the remaining tuples:
return list(OrderedDict.fromkeys(idx).keys())
def __len__(self):
return self.data.__len__()
def __repr__(self):
return 'Pattern\n-------\n'+self.data.__repr__()
def is_connected(self, from_int, to_int):
"""
Check whether the specified interfaces are connected.
Parameters
----------
from_int, to_int : int
Interface identifiers; must be in `self.interface.keys()`.
Returns
-------
result : bool
True if at least one connection from a port identifier in interface
`from_int` to a port identifier in interface `to_int` exists.
"""
assert from_int != to_int
assert from_int in self.interface.interface_ids
assert to_int in self.interface.interface_ids
# Get indices of the 'from' and 'to' interfaces as lists to speed up the
# check below [*]:
from_idx = set(self.interface.data[self.interface.data['interface'] == from_int].index.tolist())
to_idx = set(self.interface.data[self.interface.data['interface'] == to_int].index.tolist())
# Get index of all defined connections:
idx = self.data[self.data['conn'] != 0].index
for t in idx:
# Split tuple into 'from' and 'to' identifiers; since the interface
# index for a 'from' or 'to' identifier is an Index rather than a
# MultiIndex, we need to extract a scalar rather than a tuple in the
# former case:
if self.num_levels['from'] == 1:
from_id = t[0]
else:
from_id = t[0:self.num_levels['from']]
if self.num_levels['to'] == 1:
to_id = t[self.num_levels['from']]
else:
to_id = t[self.num_levels['from']:self.num_levels['from']+self.num_levels['to']]
# Check whether port identifiers are in the interface indices [*]:
if from_id in from_idx and to_id in to_idx:
return True
return False
def from_csv(self, file_name, **kwargs):
"""
Read connectivity data from CSV file.
Given N 'from' levels and M 'to' levels in the internal index,
the method assumes that the first N+M columns in the file specify
the index levels.
See Also
--------
pandas.read_csv
"""
# XXX this should refuse to load identifiers that are not in any of the
# sets of ports comprised by the pattern:
data_names = self.data.columns
index_names = self.data.index.names
kwargs['names'] = data_names
kwargs['index_col'] = range(len(index_names))
data = pd.read_csv(file_name, **kwargs)
self.data = data
# Restore MultiIndex level names:
self.data.index.names = index_names
@classmethod
def from_graph(cls, g, return_key_order = False):
"""Convert a NetworkX directed graph into a Pattern instance.
Parameters
----------
g : networkx.DiGraph
Graph to convert. The node identifiers must be port identifiers.
return_key_order : bool
Whether to return the keys of all identifiers
Returns
-------
p : Pattern
Pattern instance.
key : List
A list of keys of identifier, of which the order determines
the numbering of interfaces
Notes
-----
The nodes in the specified graph must contain an 'interface' attribute.
Port attributes other than 'interface', 'io', and 'type' are not stored
in the created Pattern instance's interface.
"""
assert type(g) == nx.DiGraph
# Group port identifiers by interface number and whether the ports are
# graded potential, or spiking:
ports_by_int = {}
ports_gpot = []
ports_spike = []
ports_from = []
ports_to = []
for n, data in g.nodes(data=True):
assert SelectorMethods.is_identifier(n)
assert 'interface' in data
if not data['interface'] in ports_by_int:
ports_by_int[data['interface']] = []
ports_by_int[data['interface']].append(n)
if 'type' in data:
if data['type'] == 'gpot':
ports_gpot.append(n)
elif data['type'] == 'spike':
ports_spike.append(n)
# Use connection direction to determine whether ports are source or
# destination (XXX should this check whether the io attributes are
# consistent with the connection directions?):
for f, t in g.edges():
ports_from.append(f)
ports_to.append(t)
# Create selectors for each interface number:
selector_list = []
key_order = sorted(ports_by_int.keys())
for interface in key_order:
selector_list.append(','.join(ports_by_int[interface]))
p = cls.from_concat(*selector_list,
from_sel=','.join(ports_from),
to_sel=','.join(ports_to),
gpot_sel=','.join(ports_gpot),
spike_sel=','.join(ports_spike),
data=1)
p.data.sort_index(inplace=True)
p.interface.data.sort_index(inplace=True)
if return_key_order:
return p, key_order
else:
return p
@classmethod
def split_multiindex(cls, idx, a, b):
"""
Split a single MultiIndex into two instances.
Parameters
----------
idx : pandas.MultiIndex
MultiIndex to split.
a, b : slice
Ranges of index columns to include in the two resulting instances.
Returns
-------
idx_a, idx_b : pandas.MultiIndex
Resulting MultiIndex instances.
"""
t_list = idx.tolist()
idx_a = pd.MultiIndex.from_tuples([t[a] for t in t_list])
idx_b = | pd.MultiIndex.from_tuples([t[b] for t in t_list]) | pandas.MultiIndex.from_tuples |
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import datetime
def app():
st.title('Syllabus')
st.write('This page will show the graphs and tables based on the Faculty Particpation in Syllabus')
data = st.file_uploader("Upload your relevant excel file")
df = | pd.read_csv(data) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
交易日历处理
"""
__author__ = 'mayanqiong'
import os
from datetime import date, datetime
from typing import Union, List
import pandas as pd
import requests
rest_days_df = None
chinese_holidays_range = None
def _init_chinese_rest_days(headers=None):
global rest_days_df, chinese_holidays_range
if rest_days_df is None:
url = os.getenv("TQ_CHINESE_HOLIDAY_URL", "https://files.shinnytech.com/shinny_chinese_holiday.json")
rsp = requests.get(url, timeout=30, headers=headers)
chinese_holidays = rsp.json()
_first_day = date(int(chinese_holidays[0].split('-')[0]), 1, 1) # 首个日期所在年份的第一天
_last_day = date(int(chinese_holidays[-1].split('-')[0]), 12, 31) # 截止日期所在年份的最后一天
chinese_holidays_range = (_first_day, _last_day)
rest_days_df = pd.DataFrame(data={'date': pd.Series(pd.to_datetime(chinese_holidays, format='%Y-%m-%d'))})
rest_days_df['trading_restdays'] = False # 节假日为 False
return chinese_holidays_range
def _get_trading_calendar(start_dt: date, end_dt: date, headers=None):
"""
获取一段时间内,每天是否是交易日
:return: DataFrame
date trading
2019-12-05 True
2019-12-06 True
2019-12-07 False
2019-12-08 False
2019-12-09 True
"""
_init_chinese_rest_days(headers=headers)
df = pd.DataFrame()
df['date'] = pd.Series(pd.date_range(start=start_dt, end=end_dt, freq="D"))
df['trading'] = df['date'].dt.dayofweek.lt(5)
result = | pd.merge(rest_days_df, df, sort=True, how="right", on="date") | pandas.merge |
import sys
sys.path.append("../utils")
import random
import time
from collections import Counter
from typing import Callable
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
import torch.utils.data
import torchvision
from data_prep import machine_translation
from modelsummary import summary
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from torch.cuda.amp import GradScaler
from torch.utils.data import DataLoader, TensorDataset
from transformers import BertModel, BertTokenizer, logging
from utils import (
ImbalancedDatasetSampler,
language_model_preprocessing,
translated_preprocessing,
)
from bertGRU import BERTGRUSentiment
from train_model import epoch_time, evaluate, train
device = "cuda:0" if torch.cuda.is_available() else "cpu"
print(f"Device for training: {device}")
logging.set_verbosity_error()
########## HYPER-PARAMETERS ##########
BATCH_SIZE = 128
LEARNING_RATE = 0.0005
WEIGHT_DECAY = 0.1
N_EPOCHS = 100
SEED = 0
use_sampling = True
classes = 3
######################################
# Control sources of randomness
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
logging.set_verbosity_error()
# Load dataset
dataset = | pd.read_csv("../data/dataset.csv", index_col=False, sep="\t") | pandas.read_csv |
"""Тесты для таблицы с торгуемыми ценными бумагами."""
from datetime import date
import pandas as pd
import pytest
from poptimizer.data import ports
from poptimizer.data.domain import events
from poptimizer.data.domain.tables import base, securities
from poptimizer.shared import col
TICKER_CASES = (
("1", 0),
("2", 1),
("D", 0),
("a", None),
)
@pytest.mark.parametrize("ticker, answer", TICKER_CASES)
def test_ticker_type(ticker, answer):
"""Проверка, что тикер соответствует обыкновенной акции."""
if answer is None:
with pytest.raises(securities.WrongTickerTypeError):
securities._ticker_type(ticker)
else:
assert securities._ticker_type(ticker) is answer
@pytest.fixture(scope="function", name="table")
def create_table():
"""Создает пустую таблицу для тестов."""
id_ = base.create_id(ports.SECURITIES)
return securities.Securities(id_)
def test_update_cond(table):
"""Обновление происходит всегда при поступлении события."""
assert table._update_cond(object())
@pytest.mark.asyncio
async def test_load_and_format_df(table, mocker):
"""Данные загружаются и добавляется колонка с названием рынка."""
fake_gateway = mocker.AsyncMock()
fake_gateway.return_value = | pd.DataFrame([[10]], columns=[col.TICKER_TYPE]) | pandas.DataFrame |
import logging
import itertools
import re
import copy
from pandas import DataFrame, concat
from scipy.spatial.distance import pdist, squareform
from pylie.model.liebase import LIEDataFrameBase
from pylie.methods.fileio import PDBParser, MOL2Parser, _open_anything
from pylie.methods.data import METALS, STRUCTURE_DATA_INFO
from pylie.methods.geometry import *
logger = logging.getLogger('pylie')
DEFAULT_CONTACT_COLUMN_NAMES = {'atnum': 'atnum',
'atname': 'atname',
'atalt': 'atalt',
'attype': 'attype',
'resname': 'resname',
'chain': 'chain',
'model': 'model',
'label': 'label',
'resnum': 'resnum',
'resext': 'resext',
'xcoor': 'xcoor',
'ycoor': 'ycoor',
'zcoor': 'zcoor',
'occ': 'occ',
'b': 'b',
'segid': 'segid',
'elem': 'elem',
'charge': 'charge',
'group': 'group'}
# Initiate chemical information dictionary as pandas DataFrame
cheminfo = | DataFrame(STRUCTURE_DATA_INFO) | pandas.DataFrame |
import pandas as pd
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import random
from sklearn import svm
from keras.optimizers import Adam
from keras.layers import LeakyReLU
from nltk.stem import WordNetLemmatizer
import operator
from textblob import TextBlob
from nltk.tokenize import sent_tokenize, word_tokenize
import nltk
import re
from wordcloud import WordCloud
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.models import Model
from keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.callbacks import EarlyStopping
class MBTI():
def __init__(self):
self.csv_path = "mbti_1.csv"
self.df = pd.read_csv(self.csv_path)
self.original_df = self.df.copy()
self.porter = PorterStemmer()
self.lancaster = LancasterStemmer()
self.lemmatizer = WordNetLemmatizer()
self.all_words = {}
def store_clean_df(self):
self.df.to_csv('clean.csv')
def load_clean_df(self):
self.df = pd.read_csv('clean.csv')
def transform_df(self):
# Transform the df into four different df - one for each subproblem (IE,JP,NS,TF)
transformed_df = self.df.copy()
transformed_df['posts'] = transformed_df['posts'].apply(lambda x: x.replace('|||', ''))
transformed_df['posts'] = transformed_df['posts'].apply(lambda x: ''.join([i for i in x if not i.isdigit()]))
counter = 0
print(transformed_df.size)
transformed_df['posts'] = transformed_df.apply(lambda row: nltk.word_tokenize(row['posts']), axis=1)
for row_posts in transformed_df['posts'].tolist():
print(counter)
print(row_posts)
counter+=1
for feature in row_posts:
try:
self.all_words[feature] += 1
except:
self.all_words[feature] = 0
print('Features found')
self.all_words = dict(sorted(self.all_words.items(), key=operator.itemgetter(1), reverse=True))
keys = list(self.all_words.keys())[:5000]
exists = {}
counter = 0
for word in keys:
counter +=1
print(counter)
exists[word] = []
for row_posts in transformed_df['posts'].tolist():
features = row_posts
exists[word].append(features.count(word))
for word in exists:
transformed_df[word]= exists[word]
del transformed_df['type']
del transformed_df['posts']
IE_df = transformed_df.copy()
del IE_df['JP']
del IE_df['TF']
del IE_df['NS']
del IE_df['Unnamed: 0']
JP_df = transformed_df.copy()
del JP_df['IE']
del JP_df['TF']
del JP_df['NS']
del JP_df['Unnamed: 0']
TF_df = transformed_df.copy()
del TF_df['JP']
del TF_df['IE']
del TF_df['NS']
del TF_df['Unnamed: 0']
NS_df = transformed_df.copy()
del NS_df['JP']
del NS_df['IE']
del NS_df['TF']
del NS_df['Unnamed: 0']
print('Finished')
return IE_df, JP_df, TF_df, NS_df
def post_cleaner(self, post):
post = post.lower()
post = re.sub(
r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''',
'', post, flags=re.MULTILINE)
puncs1 = ['@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '_', '+', '=', '{', '}', '[', ']', '\\', '"',
"'", ';', ':', '<', '>', '/']
for punc in puncs1:
post = post.replace(punc, '')
puncs2 = [',', '.', '?', '!', '\n']
for punc in puncs2:
post = post.replace(punc, ' ')
post = re.sub('\s+', ' ', post).strip()
return post
def perform_eda(self):
# ++++++ Print information and description of the data
#print("+++++++++++ self.df.info:")
print(self.df.info())
types = self.df.type.tolist()
pd.Series(types).value_counts().plot(kind="bar")
plt.savefig("plot1.png")
def stemSentence(self, sentence):
token_words = word_tokenize(sentence)
stem_sentence = []
for word in token_words:
stem_sentence.append(self.lemmatizer.lemmatize(word))
stem_sentence.append(" ")
return "".join(stem_sentence)
def prepare_df(self):
posts = self.df.posts.tolist()
#clean
posts = [self.post_cleaner(post) for post in posts]
#lemmatize
posts = [self.stemSentence(post) for post in posts]
self.df['posts'] = posts
#print(self.df.head(1))
# Create 4 more columns for binary classification - LABEL ENCODING, ONE-HOT ENCODING
map1 = {"I": 0, "E": 1}
map2 = {"N": 0, "S": 1}
map3 = {"T": 0, "F": 1}
map4 = {"J": 0, "P": 1}
self.df['IE'] = self.df['type'].astype(str).str[0]
self.df['IE'] = self.df['IE'].map(map1)
self.df['NS'] = self.df['type'].astype(str).str[1]
self.df['NS'] = self.df['NS'].map(map2)
self.df['TF'] = self.df['type'].astype(str).str[2]
self.df['TF'] = self.df['TF'].map(map3)
self.df['JP'] = self.df['type'].astype(str).str[3]
self.df['JP'] = self.df['JP'].map(map4)
def add_features(self):
# Add new features, such as words per comment, links per comment, images per comment...
self.df['ellipsis_per_comment'] = self.df['posts'].apply(lambda x: x.count('...') / (x.count("|||") + 1))
self.df['words_per_comment'] = self.df['posts'].apply(lambda x: x.count(' ') / (x.count("|||") + 1))
self.df['words'] = self.df['posts'].apply(lambda x: x.count(' '))
self.df['link_per_comment'] = self.df['posts'].apply(lambda x: x.count('http') / (x.count("|||") + 1))
self.df['smiles_per_comment'] = self.df['posts'].apply(lambda x: (x.count(':-)') + x.count(':)') + x.count(':-D') + x.count(':D')) / (x.count("|||") + 1))
self.df['sad'] = self.df['posts'].apply(lambda x: (x.count(':(') + x.count('):') ) / (x.count("|||") + 1))
self.df['heart'] = self.df['posts'].apply(lambda x: x.count('<3') / (x.count("|||") + 1))
self.df['smiling'] = self.df['posts'].apply(lambda x: x.count(';)') / (x.count("|||") + 1))
self.df['exclamation_mark_per_comment'] = self.df['posts'].apply(lambda x: x.count("!") / (x.count("|||") + 1))
self.df['question_mark_per_comment'] = self.df['posts'].apply(lambda x: x.count("?") / (x.count("|||") + 1))
self.df['polarity'] = self.df['posts'].apply(lambda x: TextBlob(x).sentiment.polarity)
def plot(self):
# Plot each category to see if it is balanced - We observe that IE and NS are fairly imbalanced.
binary1 = self.df.IE.tolist()
pd.Series(binary1).value_counts().plot(kind="bar", title="0=I, 1=E")
# plt.show()
plt.savefig("IE.png")
binary1 = self.df.NS.tolist()
pd.Series(binary1).value_counts().plot(kind="bar", title="0=N, 1=S")
# plt.show()
plt.savefig("NS.png")
binary1 = self.df.TF.tolist()
pd.Series(binary1).value_counts().plot(kind="bar", title="0=T, 1=F")
# plt.show()
plt.savefig("TF.png")
binary1 = self.df.JP.tolist()
pd.Series(binary1).value_counts().plot(kind="bar", title="0=J, 1=P")
# plt.show()
plt.savefig("JP.png")
# PLOT 2
plt.figure(figsize=(15, 10))
sns.swarmplot("type", "words_per_comment", data=self.df)
plt.savefig("plot2.png")
# PLOT 3
plt.figure(figsize=(15, 10))
sns.jointplot("variance_of_word_counts", "words_per_comment", data=self.df, kind="hex")
# plt.show()
plt.savefig("plot3.png")
def wordcloud(self):
fig, ax = plt.subplots(len(self.df['type'].unique()), sharex=True, figsize=(15,10*len(self.df['type'].unique())))
k = 0
for i in self.df['type'].unique():
df_4 = self.df[self.df['type'] == i]
wordcloud = WordCloud().generate(df_4['posts'].to_string())
ax[k].imshow(wordcloud)
ax[k].set_title(i)
ax[k].axis("off")
k+=1
wordcloud.to_file('N.png')
def create_clean_df(self):
self.perform_eda()
self.add_features()
self.prepare_df()
self.store_clean_df()
def create_transformed_df(self):
self.load_clean_df()
IE_df, JP_df, TF_df, NS_df = self.transform_df()
IE_df.to_csv('IE_df.csv')
JP_df.to_csv('JP_df.csv')
TF_df.to_csv('TF_df.csv')
NS_df.to_csv('NS_df.csv')
def remove_bars(self):
self.df['posts'] = self.df['posts'].apply(lambda x: x.replace('|||', ''))
def svm(self):
IE_df = pd.read_csv('IE_df.csv')
y = IE_df['IE']
del IE_df['IE']
x_train, x_test, y_train, y_test = train_test_split(IE_df, y, test_size=0.20, random_state=1, stratify=y)
IE_accuracy = self.perform_svm(x_train, x_test, y_train, y_test)
print('IE')
print(IE_accuracy)
JP_df = pd.read_csv('JP_df.csv')
y = JP_df['JP']
del JP_df['JP']
x_train, x_test, y_train, y_test = train_test_split(JP_df, y, test_size=0.20, random_state=1, stratify=y)
JP_accuracy = self.perform_svm(x_train, x_test, y_train, y_test)
print('JP')
print(JP_accuracy)
TF_df = | pd.read_csv('TF_df.csv') | pandas.read_csv |
# Import the required packages
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import streamlit as st
import pickle
from pickle import load
from PIL import Image
import seaborn as sns
import statsmodels.api as sm
import lime.lime_tabular
from sklearn.model_selection import train_test_split
import string
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
# Set Recursion Limit
import sys
sys.setrecursionlimit(40000)
import re
import nltk
import regex as re
from nltk.corpus import stopwords
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score
import lightgbm as lgb
from lightgbm import LGBMClassifier
import streamlit.components.v1 as components
import tweepy
from collections import Counter
from wordcloud import WordCloud
import datetime
import plotly.express as px
import time
import pydeck as pdk
import SessionState # Assuming SessionState.py lives on this folder
st.sidebar.title('Dashboard Control')
control = st.sidebar.radio('Navigation Bar', ('Home', 'Live Tweet Feed', 'Time Series Analysis', 'XAI'))
if control == 'Home':
### Sentiment Code goes here
st.markdown('<h1 style="color:#8D3DAF;text-align:center;font-family: Garamond, serif;"><b>RAKSHAK</b></h1>',unsafe_allow_html=True)
st.markdown('<h2 style="color:#E07C24;text-align:center;font-family: Georgia, serif;"><b>Time Series Sentiment Analysis Of Natural Hazard Relief Operations Through Social Media Data</b></h2>',unsafe_allow_html=True)
#st.markdown("The dashboard will help the government and humanitarian aid agencies to plan and coordinate the natural disaster relief efforts, resulting in more people being saved and more effective distribution of emergency supplies during a natural hazard")
st.header("Natural Hazard Data Collected Sample")
# Dataset
# Load the Dataset
tweets1 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/nepal_mix_1.csv")[['text','type']]
tweets2 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/italy_mix_1.csv")[['text','type']]
tweets3 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/Covid-19.csv")[['text','type']]
names = [tweets1,tweets2,tweets3]
# Concatenate the datasets
tweets = pd.concat(names,ignore_index = True)
# Reshuffle the dataset
tweets = tweets.sample(frac = 1)
# Reindex the dataset
tweets['index'] = list(range(0,tweets.shape[0],1))
tweets.set_index('index', inplace=True)
tweets['type'] = tweets['type'].map({0: 'Need', 1: 'Availability', 2: 'Other'})
# Change column names for consistency
tweets.columns = ['text', 'type']
# Dataset Description
h = st.sidebar.slider('Select the number of tweets using the slider', 1, 100, 10)
data_tweets = tweets.sample(h)
data_tweets['index'] = list(range(0, h, 1))
data_tweets.set_index('index', inplace=True)
st.table(data_tweets)
# Checking for class balancing and get unique labels:
st.header("Count Of Tweets In Each Class")
chart_visual_class_balancing = st.sidebar.checkbox('Class Labels', True)
if chart_visual_class_balancing==True:
fig = plt.figure(figsize=(8, 4))
#sns.countplot(y=tweets.loc[:, 'type'],data=tweets).set_title("Count of tweets in each class")
fig = px.histogram(tweets, x="type",color="type",title="Count of tweets in each class")
st.plotly_chart(fig)
# Wordclouds
# Selection of Input & Output Variables
X = tweets.loc[:, 'text']
Y = tweets.loc[:, 'type']
X = list(X)
def preprocess_dataset(d):
# Define count variables
cnt=0
punctuation_count = 0
digit_count = 0
# Convert the corpus to lowercase
lower_corpus = []
for i in range(len(d)):
lower_corpus.append(" ".join([word.lower() for word in d[i].split()]))
# Remove any special symbol or punctuation
without_punctuation_corpus = []
for i in range(len(lower_corpus)):
p = []
for ch in lower_corpus[i]:
if ch not in string.punctuation:
p.append(ch)
else:
p.append(" ")
# Count of punctuation marks removed
punctuation_count += 1
x = ''.join(p)
if len(x) > 0:
without_punctuation_corpus.append(x)
# Remove urls with http, https or www and Retweets RT
without_url_corpus = []
for i in range(len(without_punctuation_corpus)):
text = without_punctuation_corpus[i]
text = re.sub(r"http\S*||www\S*", "", text)
text = re.sub(r"RT ", "", text)
without_url_corpus.append(text)
# Remove special characters and numbers from the corpus
without_digit_corpus = []
for i in range(len(without_url_corpus)):
p = []
for word in without_url_corpus[i].split():
if word.isalpha():
p.append(word)
else:
# Count of punctuation marks removed
digit_count += 1
x = ' '.join(p)
without_digit_corpus.append(x)
# Tokenize the corpus
# word_tokenize(s): Tokenize a string to split off punctuation other than periods
# With the help of nltk.tokenize.word_tokenize() method, we are able to extract the tokens
# from string of characters by using tokenize.word_tokenize() method.
# Tokenization was done to support efficient removal of stopwords
total_count = 0
tokenized_corpus = []
for i in without_digit_corpus:
tokenized_tweet = nltk.word_tokenize(i)
tokenized_corpus.append(tokenized_tweet)
# Count the length of tokenized corpus
total_count += len(list(tokenized_tweet))
# Remove Stopwords
stopw = stopwords.words('english')
count = 0
tokenized_corpus_no_stopwords = []
for i,c in enumerate(tokenized_corpus):
tokenized_corpus_no_stopwords.append([])
for word in c:
if word not in stopw:
tokenized_corpus_no_stopwords[i].append(word)
else:
count += 1
# lemmatization and removing words that are too large and small
lemmatized_corpus = []
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
ct = 0
cnt_final=0
dictt = {}
for i in range(0,len(tokenized_corpus_no_stopwords)):
lemmatized_corpus.append([])
for w in tokenized_corpus_no_stopwords[i]:
# lematizing only those words whose length >= 2 and <=10
# Considering words with length greater than or equal to 2 and less than or equal to 10
if(len(w)>2 and len(w)<=10):
lemmatized_corpus[i].append(lemmatizer.lemmatize(w))
cnt_final+=1
# Count of final corpus
# This is the length of total corpus that went through the process of lematization
ct+=1
############## Removing words of large and small length
# Doing a survey to find out the length of words so we can remove the too small and too large words from the Corpus
# plt.bar(*zip(*dictt.items()))
# plt.show()
# Punctuation Preprocessing
preprocessed_corpus = []
for i,c in enumerate(lemmatized_corpus):
preprocessed_corpus.append([])
for word in c:
x = ''.join([ch for ch in word if ch not in string.punctuation])
if len(x) > 0:
preprocessed_corpus[i].append(x)
# Clear unwanted data variables to save RAM due to memory limitations
del lower_corpus
del without_punctuation_corpus
del without_digit_corpus
del tokenized_corpus
del tokenized_corpus_no_stopwords
del lemmatized_corpus
return preprocessed_corpus
# Preprocess the Input Variables
preprocessed_corpus = preprocess_dataset(X)
data_corpus = []
for i in preprocessed_corpus:
data_corpus.append(" ".join([w for w in i]))
# Creating a word cloud
st.header("Wordclouds For Dataset")
fig, axes = plt.subplots(1, 2)
# Worcloud for processed dataset
words1 = ' '.join([tweet for tweet in X])
words2 = ' '.join([tweet for tweet in data_corpus])
wordCloud1 = WordCloud(background_color ='black').generate(words1)
wordCloud2 = WordCloud(background_color ='black').generate(words2)
# Display the generated image:
axes[0].title.set_text("Raw Dataset")
axes[0].imshow(wordCloud1)
axes[0].axis("off")
axes[1].title.set_text("Processed Dataset")
axes[1].imshow(wordCloud2)
axes[1].axis("off")
st.pyplot(fig)
# Create most used hashtags
st.header("Top Hashtag Used in the Datasets")
fig, axes = plt.subplots(1, 3)
tweets1 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/nepal_mix_1.csv")[['text','type']]
tweets2 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/italy_mix_1.csv")[['text','type']]
tweets3 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/Covid-19.csv")[['text','type']]
X1 = list(tweets1.loc[:, 'text'])
X2 = list(tweets2.loc[:, 'text'])
X3 = list(tweets3.loc[:, 'text'])
dc1 = []
pd1 = preprocess_dataset(X1)
for i in pd1:
dc1 += i
c1 = Counter(dc1)
mfw1 = c1.most_common(10)
df1 = pd.DataFrame(mfw1)
df1.columns = ['Word', 'Count']
axes[0] = px.line(df1, x='Word', y='Count',title='Nepal Earthquake 2015',labels={'Word':'Hashtag', 'Count':'Number of Hashtag tweeted'})
st.plotly_chart(axes[0])
dc2 = []
pd2 = preprocess_dataset(X2)
for i in pd2:
dc2 += i
c2 = Counter(dc2)
mfw2 = c2.most_common(10)
df2 = pd.DataFrame(mfw2)
df2.columns = ['Word', 'Count']
axes[1] = px.line(df2, x='Word', y='Count',title='Italy Earthquake 2016', labels={'Word':'Hashtag', 'Count':'Number of Hashtag tweeted'})
st.plotly_chart(axes[1])
dc3 = []
pd3 = preprocess_dataset(X3)
for i in pd3:
dc3 += i
c3 = Counter(dc3)
mfw3 = c3.most_common(10)
df3 = pd.DataFrame(mfw3)
df3.columns = ['Word', 'Count']
axes[2] = px.line(df3, x='Word', y='Count',title='COVID-19',labels={'Word':'Hashtag', 'Count':'Number of Hashtag tweeted'})
st.plotly_chart(axes[2])
#df3.set_index('Word', inplace=True)
#axes[2].plot(df3['Count'], marker='o', linewidth=0.5,ls='solid', c='blue')
#axes[2].tick_params(axis ='x', rotation =-90)
#axes[2].set_xlabel('Hashtag')
#axes[2].set_ylabel('Number of Hashtag tweeted')
#axes[2].title.set_text("COVID-19")
st.header("Sentiments of Tweets Collected")
st.caption("Select Start & End Date to display Sentiments of tweets collected")
s_date = st.date_input("Start Date", min_value=datetime.datetime(2021, 4, 1), max_value=datetime.datetime(2021, 4, 30), value=datetime.datetime(2021, 4, 1))
e_date = st.date_input("End Date", min_value=datetime.datetime(2021, 4, 1), max_value=datetime.datetime(2021, 4, 30), value=datetime.datetime(2021, 4, 30))
data = pd.read_csv('sentiment_april.csv')[['Need','Availability']]
data_T = data.T
date1 = int(str(s_date)[8:])-1
date2 = int(str(e_date)[8:])
data_T["sum"] = data_T[list(range(date1,date2,1))].sum(axis=1)
l_name = ['Need', 'Availability']
l_value = data_T['sum']
pie_dict = {'name': l_name, 'value': l_value}
pie_df = pd.DataFrame(pie_dict)
fig_pie = px.pie(pie_df, values='value', names='name', title='Sentiments of tweet collected between '+str(s_date)+' and '+str(e_date))
st.plotly_chart(fig_pie)
# Show locations for tweets
st.header("Map for Location of Each User")
df = pd.read_csv('lat-long.csv')
df.columns = ['lat', 'lon', 'country']
st.map(df)
elif control == 'Live Tweet Feed':
### Libe Tweet feed goes here
st.markdown('<h1 style="color:#E07C24;;text-align:center;"><b>Live Tweet Feed</b></h1>',unsafe_allow_html=True)
st.header("Live Tweet Feed Sample")
hashtag = str(st.text_input("Enter the keyword or hashtag for live twitter fee", "#coronavirus"))
fetch_tweets = st.button("Fetch Tweets")
####input your credentials here
consumer_key = "IE5dmFVlYdg5aNrsNnZiXZVPa"
consumer_secret = "<KEY>"
access_token = "<KEY>"
access_token_secret = "<KEY>"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
if fetch_tweets:
# Current Time
current_time = time.time()
diff = 0
real_time = 0
live_tweet_text = []
live_tweet_date = []
live_tweet_id = []
lt_user_name = []
lt_user_location = []
lt_user_screenname=[]
lt_followers = []
lt_following = []
while(diff < 10):
for tweet in tweepy.Cursor(api.search_tweets,q=hashtag,count=10,lang="en",since="2021-12-11").items():
real_time = time.time()
diff = real_time - current_time
if diff >10:
break
if (not tweet.retweeted) and ('RT @' not in tweet.text):
#print(tweet,"\n\n\n\n\n")
live_tweet_text.append(tweet.text)
live_tweet_date.append(tweet.created_at)
live_tweet_id.append(tweet.id)
lt_user_name.append(tweet.user.name)
lt_user_location.append(tweet.user.location)
lt_user_screenname.append(tweet.user.screen_name)
lt_followers.append(str(tweet.user.followers_count))
lt_following.append(str(tweet.user.friends_count))
live_tweet_feed_dict = {'Tweet ID':live_tweet_id, 'Tweet': live_tweet_text, 'Date & Time': live_tweet_date, 'Username': lt_user_screenname, 'User Full Name': lt_user_name, 'Location': lt_user_location, 'Follower Count': lt_followers, 'Following Count': lt_following}
live_tweet_feed = pd.DataFrame(live_tweet_feed_dict)
st.dataframe(live_tweet_feed)
elif control == 'Time Series Analysis':
### Streamlit code starts here
st.markdown('<h1 style="color:#E07C24;;text-align:center;"><b>Time Series Analysis of Disaster Tweets</b></h1>',unsafe_allow_html=True)
### Time Series Code goes here
# Dataset
# Load the Dataset
tweets1 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/nepal_mix_1.csv")[['text','type']]
tweets2 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/italy_mix_1.csv")[['text','type']]
tweets3 = pd.read_csv("https://raw.githubusercontent.com/anidevhere/Temp/main/Covid-19.csv")[['text','type']]
names = [tweets1,tweets2,tweets3]
# Concatenate the datasets
tweets = pd.concat(names,ignore_index = True)
# Reshuffle the dataset
tweets = tweets.sample(frac = 1)
# Reindex the dataset
tweets['index'] = list(range(0,tweets.shape[0],1))
tweets.set_index('index', inplace=True)
tweets['type'] = tweets['type'].map({0: 'Need', 1: 'Availability', 2: 'Other'})
# Change column names for consistency
tweets.columns = ['text', 'type']
tweets['type'] = tweets['type'].map({'Need':0, 'Availability':1,'Other':2})
# Get all the labels used in the labelling column
label = tweets.type.unique()
print("Labels:", label)
# Remove label 2 from the list because not required for time series analysis
label = np.delete(label,np.where(label == 2))
print("Labels:", label)
# Add names to the numerical labels
label_name = []
for i in label:
if i == 0:
label_name.append("Need")
elif i == 1:
label_name.append("Availability")
# Choose interval
interval = 30
start_date = "2021-04-01"
# Create Timestamps with intervals
ds = pd.date_range(start=start_date, periods=interval)
dates = []
for i in ds:
dates.append(i.strftime('%m-%d-%Y'))
del ds
# Divide the Dataset into intervals
# Divide the dataset into the given number of intervals
num_of_tweets_per_interval = math.floor(tweets.shape[0]/interval)
# Create Time Series with intervals
data = []
count_of_data = []
for i in label:
count_of_data.append([])
for i in range(1,interval+1,1):
# Draw a sample from the tweets
tw = tweets.sample(n=num_of_tweets_per_interval, random_state=10, replace=False)
# Append the statistics of the drawn sample to the list
stat = dict()
for j in range(0,len(label)):
stat[label[j]] = list(tw['type']).count(label[j])
count_of_data[j].append(list(tw['type']).count(label[j]))
data.append(stat)
# Remove the already drawn tweets from the dataset
tweets.drop(labels=list(tw.index.values),inplace=True)
# Real Time Series starts here
# Load Dataset
df = | pd.DataFrame(count_of_data) | pandas.DataFrame |
import logging
import multiprocessing as mp
from multiprocessing.pool import Pool
import pandas as pd
import numpy as np
from ..algorithms import Recommender
from .. import util
_logger = logging.getLogger(__name__)
_rec_context = None
class MPRecContext:
def __init__(self, algo, candidates, size):
self.algo = algo
self.candidates = candidates
self.size = size
def __enter__(self):
global _rec_context
_logger.debug('installing context for %s', self.algo)
_rec_context = self
return self
def __exit__(self, *args, **kwargs):
global _rec_context
_logger.debug('uninstalling context for %s', self.algo)
_rec_context = None
def _recommend_user(algo, user, n, candidates):
_logger.debug('generating recommendations for %s', user)
watch = util.Stopwatch()
res = algo.recommend(user, n, candidates)
_logger.debug('%s recommended %d/%d items for %s in %s', algo, len(res), n, user, watch)
res['user'] = user
res['rank'] = np.arange(1, len(res) + 1)
return res
def _recommend_seq(algo, users, n, candidates):
if isinstance(candidates, dict):
candidates = candidates.get
algo = Recommender.adapt(algo)
results = [_recommend_user(algo, user, n, candidates(user))
for user in users]
return results
def _recommend_worker(user):
candidates = _rec_context.candidates(user)
algo = Recommender.adapt(_rec_context.algo)
res = _recommend_user(algo, user, _rec_context.size, candidates)
return res.to_msgpack()
def recommend(algo, users, n, candidates, ratings=None, nprocs=None):
"""
Batch-recommend for multiple users. The provided algorithm should be a
:py:class:`algorithms.Recommender` or :py:class:`algorithms.Predictor` (which
will be converted to a top-N recommender).
Args:
algo: the algorithm
model: The algorithm model
users(array-like): the users to recommend for
n(int): the number of recommendations to generate (None for unlimited)
candidates:
the users' candidate sets. This can be a function, in which case it will
be passed each user ID; it can also be a dictionary, in which case user
IDs will be looked up in it.
ratings(pandas.DataFrame):
if not ``None``, a data frame of ratings to attach to recommendations when
available.
Returns:
A frame with at least the columns ``user``, ``rank``, and ``item``; possibly also
``score``, and any other columns returned by the recommender.
"""
if nprocs and nprocs > 1 and mp.get_start_method() == 'fork':
_logger.info('starting recommend process with %d workers', nprocs)
with MPRecContext(algo, candidates, n), Pool(nprocs) as pool:
results = pool.map(_recommend_worker, users)
results = [pd.read_msgpack(r) for r in results]
else:
_logger.info('starting sequential recommend process')
results = _recommend_seq(algo, users, n, candidates)
results = | pd.concat(results, ignore_index=True) | pandas.concat |
# SageMaker deployment -- Final project (PyTorch and SageMaker)
#
# Pure Python version (no Jupyter notebook) corresponding
# to SageMakekr Project.ipynb.
# Non AWS version for quick prototyping.
#
#
# %% Get the data
# Only execute that once.
# !wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
# !tar -zxf ../data/aclImdb_v1.tar.gz -C ../data
# %% Prepare and process the data (1/6)
import os
import glob
def read_imdb_data(data_dir='../data/aclImdb'):
data = {}
labels = {}
for data_type in ['train', 'test']:
data[data_type] = {}
labels[data_type] = {}
for sentiment in ['pos', 'neg']:
data[data_type][sentiment] = []
labels[data_type][sentiment] = []
path = os.path.join(data_dir, data_type, sentiment, '*.txt')
files = glob.glob(path)
for f in files:
with open(f) as review:
data[data_type][sentiment].append(review.read())
# Here we represent a positive review by '1' and a negative review by '0'
labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)
assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \
"{}/{} data size does not match labels size".format(data_type, sentiment)
return data, labels
# %% Prepare and process (2/6)
data, labels = read_imdb_data()
print("IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg".format(
len(data['train']['pos']), len(data['train']['neg']),
len(data['test']['pos']), len(data['test']['neg'])))
# %% Prepare and process (3/6)
from sklearn.utils import shuffle
def prepare_imdb_data(data, labels):
"""Prepare training and test sets from IMDb movie reviews."""
#Combine positive and negative reviews and labels
data_train = data['train']['pos'] + data['train']['neg']
data_test = data['test']['pos'] + data['test']['neg']
labels_train = labels['train']['pos'] + labels['train']['neg']
labels_test = labels['test']['pos'] + labels['test']['neg']
#Shuffle reviews and corresponding labels within training and test sets
data_train, labels_train = shuffle(data_train, labels_train)
data_test, labels_test = shuffle(data_test, labels_test)
# Return a unified training data, test data, training labels, test labets
return data_train, data_test, labels_train, labels_test
# %% Prepare and process (4/6)
train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)
print("IMDb reviews (combined): train = {}, test = {}".format(len(train_X), len(test_X)))
# %% Prepare and process (5/6)
print(train_X[100])
print(train_y[100])
# %% Prepare and process (6/6)
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import *
import re
from bs4 import BeautifulSoup
def review_to_words(review):
nltk.download("stopwords", quiet=True)
stemmer = PorterStemmer()
text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case
words = text.split() # Split string into words
words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords
words = [PorterStemmer().stem(w) for w in words] # stem
return words
# %% TODO: Apply review_to_words to a review (train_X[100] or any other review)
print(review_to_words(train_X[100]))
# Answer to question:
# Apart from removing the HTML tags and performing stemming,
# the review_to_words also removes
# special characters such as punctuation and English
# stopwords such as "I", "me", "they", "but", etc.
# Furthermore, it converts the entire text to lowercase.
# %% Perform actual preprocessing and store locally in the cache
import pickle
cache_dir = os.path.join("../cache", "sentiment_analysis") # where to store cache files
os.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists
def preprocess_data(data_train, data_test, labels_train, labels_test,
cache_dir=cache_dir, cache_file="preprocessed_data.pkl"):
"""Convert each review to words; read from cache if available."""
# If cache_file is not None, try to read from it first
cache_data = None
if cache_file is not None:
try:
with open(os.path.join(cache_dir, cache_file), "rb") as f:
cache_data = pickle.load(f)
print("Read preprocessed data from cache file:", cache_file)
except:
pass # unable to read from cache, but that's okay
# If cache is missing, then do the heavy lifting
if cache_data is None:
# Preprocess training and test data to obtain words for each review
#words_train = list(map(review_to_words, data_train))
#words_test = list(map(review_to_words, data_test))
words_train = [review_to_words(review) for review in data_train]
words_test = [review_to_words(review) for review in data_test]
# Write to cache file for future runs
if cache_file is not None:
cache_data = dict(words_train=words_train, words_test=words_test,
labels_train=labels_train, labels_test=labels_test)
with open(os.path.join(cache_dir, cache_file), "wb") as f:
pickle.dump(cache_data, f)
print("Wrote preprocessed data to cache file:", cache_file)
else:
# Unpack data loaded from cache file
words_train, words_test, labels_train, labels_test = (cache_data['words_train'],
cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])
return words_train, words_test, labels_train, labels_test
# Preprocess data
train_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y)
# %% Transform the data
import numpy as np
def build_dict(data, vocab_size = 5000):
"""Construct and return a dictionary mapping each of the most frequently appearing words to a unique integer."""
# TODO: Determine how often each word appears in `data`. Note that `data` is a list of sentences and that a
# sentence is a list of words.
all_words = [word for sentence in data for word in sentence]
all_words_set = set(all_words)
# A dict storing the words that appear in the reviews along with how often they occur
word_count = {w:0 for w in all_words_set}
for word in all_words:
word_count[word] += 1
# TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and
# sorted_words[-1] is the least frequently appearing word.
word_count_pairs = [(k,v) for k,v in word_count.items()]
word_count_pairs_sorted = sorted(word_count_pairs, key=lambda pair: pair[1], reverse=True)
sorted_words = [k for k, v in word_count_pairs_sorted]
print(sorted_words[:10])
word_dict = {} # This is what we are building, a dictionary that translates words into integers
for idx, word in enumerate(sorted_words[:vocab_size - 2]): # The -2 is so that we save room for the 'no word'
word_dict[word] = idx + 2 # 'infrequent' labels
return word_dict
# %% Build the dictionary
word_dict = build_dict(train_X)
# Answer to question
word_dict_reverse={idx:w for w,idx in word_dict.items()}
for ii in range(5):
print(f'{word_dict_reverse[ii++2]}')
# The five most frequently appearing words are
# movi
# film
# one
# like
# time
#
# Given that we are dealing with movie (or film) reviews, this
# seems to make perfect sense.
# It seems to cover typical reviews such as
# 'This movie is one of the best in all time', or
# 'That film is like one of the worst ever'.
#
# As we removed stopwords, we do not find common words such
# as "the" leading this list.
#
# %% Save word dict
data_dir = '../data/pytorch' # The folder we will use for storing data
if not os.path.exists(data_dir): # Make sure that the folder exists
os.makedirs(data_dir)
with open(os.path.join(data_dir, 'word_dict.pkl'), "wb") as f:
pickle.dump(word_dict, f)
# %% Transform the reviews
def convert_and_pad(word_dict, sentence, pad=500):
NOWORD = 0 # We will use 0 to represent the 'no word' category
INFREQ = 1 # and we use 1 to represent the infrequent words, i.e., words not appearing in word_dict
working_sentence = [NOWORD] * pad
for word_index, word in enumerate(sentence[:pad]):
if word in word_dict:
working_sentence[word_index] = word_dict[word]
else:
working_sentence[word_index] = INFREQ
return working_sentence, min(len(sentence), pad)
def convert_and_pad_data(word_dict, data, pad=500):
result = []
lengths = []
for sentence in data:
converted, leng = convert_and_pad(word_dict, sentence, pad)
result.append(converted)
lengths.append(leng)
return np.array(result), np.array(lengths)
# %% Actual transforming
train_X, train_X_len = convert_and_pad_data(word_dict, train_X)
test_X, test_X_len = convert_and_pad_data(word_dict, test_X)
# %% Sanity check
# Use this cell to examine one of the processed reviews to make sure
# everything is working as intended.
review_reconstructed = [word_dict_reverse[i] for i in train_X[20,:] if i >= 2]
print('Reconstructed review')
print(review_reconstructed)
print(f'Sentiment is ' + ('pos' if train_y[20] else 'neg'))
# %% Question (same methods for training and testing data set)
# This is not a problem and in fact makes sense.
# We need to prepare the (user-written) input in a form that we
# can pass to our model. That actually needs to be done in the
# same way for training and testing.
# We do not augment data (which we would only do with training data),
# therefore it is not a problem if we apply it to the test data.
# As we convert all data separately, we are also not at risk
# of mixing any test data into our training data set.
# %% Upload the data to S3
# This cannot be done outside of AWS, but we can
# nevertheless save the data locally.
import pandas as pd
pd.concat([pd.DataFrame(train_y), | pd.DataFrame(train_X_len) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class ToDenseTransformer(BaseEstimator,TransformerMixin):
# here you define the operation it should perform
def transform(self, X, y=None, **fit_params):
return X.todense()
# just return self
def fit(self, X, y=None, **fit_params):
return self
class SelectColumnsTransfomer(BaseEstimator, TransformerMixin):
""" Select dataframe columns
"""
def __init__(self, columns=None, ravel=None):
if columns is None:
self.columns = []
elif type(columns) is not list:
self.columns = [columns]
else:
self.columns = columns
if ravel is None:
self.ravel = False
else:
self.ravel = ravel
def transform(self, X, **transform_params):
cpy_df = X[self.columns].copy()
if self.ravel:
return cpy_df.values.ravel()
else:
return cpy_df
def fit(self, X, y=None, **fit_params):
return self
class DataframeFunctionTransformer(BaseEstimator, TransformerMixin):
"""
Apply an arbitrary function to a Dataframe column, as you would use a `map` funcion
"""
def __init__(self, column_name, func, none_treatment=None):
"""
:param column_name: the name of the dataframe column to which the function will be applied
:param func: the function object, e.g. lambda
:param none_treatment: what to do with NaN, Nones, etc. Default behaviour is to perform no
special treatment, i.e. the function itself should treat nulls. Other options: 'return_none',
returns the input itself in case it's null-lie (as per pd.isnull)
"""
self.column_name = column_name
self.func = func
self.none_treatment = none_treatment
def transform(self, in_df, **transform_params):
cpy_df = in_df.copy()
if self.column_name not in cpy_df.columns.values:
raise ValueError('Provided column name is not part of the dataframe: "{}" '.format(self.column_name))
if self.none_treatment is None:
cpy_df[self.column_name] = cpy_df[self.column_name].map(self.func)
elif self.none_treatment.upper() == "RETURN_NONE":
cpy_df[self.column_name] = cpy_df[self.column_name].map(lambda x: x if | pd.isnull(x) | pandas.isnull |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from datetime import datetime
import json
import logging
import webbrowser
import numpy as np
import pandas as pd
from czsc.Data.data_fq import data_stock_to_fq
from czsc.Fetch.mongo import FACTOR_DATABASE
from czsc.Fetch.tdx import get_bar
from czsc.Indicator import IndicatorSet
from czsc.Utils.echarts_plot import kline_pro
from czsc.Utils.logs import util_log_info
from czsc.Utils.trade_date import TradeDate, util_get_real_date, util_get_next_day
from czsc.Utils.transformer import DataEncoder
def identify_direction(v1, v2):
if v1 > v2: # 前面几根可能都是包含,这里直接初始赋值-1,上升趋势为正数
direction = 1
else:
direction = -1
return direction
def update_fx(bars, new_bars: list, fx_list: list, trade_date: list):
"""更新分型序列
k线中有direction,fx中没有direction字段
分型记对象样例:
{
'date': Timestamp('2020-11-26 00:00:00'),
'fx_mark': -1, 低点用—1表示
'value': 138.0,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
{
'date': Timestamp('2020-11-26 00:00:00'),
'fx_mark': +1, 高点用+1表示
'value': 150.67,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
"""
assert len(bars) > 0
bar = bars[-1].copy()
if len(trade_date) > 1:
if TradeDate(bar['date']) < TradeDate(trade_date[-1]):
util_log_info('{} data is older than {} !'.format(bar['date'], trade_date[-1]))
return
trade_date.append(bar['date'])
# 第1根K线没有方向,不需要任何处理
if len(bars) < 2:
new_bars.append(bar)
return False
last_bar = new_bars[-1]
cur_h, cur_l = bar['high'], bar['low']
last_h, last_l, last_dt = last_bar['high'], last_bar['low'], last_bar['date']
# 处理过包含关系,只需要用一个值识别趋势
direction = identify_direction(cur_h, last_h)
# 第2根K线只需要更新方向
if len(bars) < 3:
bar.update(direction=direction)
new_bars.append(bar)
return False
last_direction = last_bar.get('direction')
# 没有包含关系,需要进行分型识别,趋势有可能改变
if (cur_h > last_h and cur_l > last_l) or (cur_h < last_h and cur_l < last_l):
new_bars.append(bar)
# 分型识别
if last_direction * direction < 0:
bar.update(direction=direction)
if direction < 0:
fx = {
"date": last_bar['date'],
"fx_mark": 1,
"value": last_bar['high'],
"fx_start": new_bars[-3]['date'], # 记录分型的开始和结束时间
"fx_end": bar['date'],
# "direction": bar['direction'],
}
else:
fx = {
"date": last_bar['date'],
"fx_mark": -1,
"value": last_bar['low'],
"fx_start": new_bars[-3]['date'], # 记录分型的开始和结束时间
"fx_end": bar['date'],
# "direction": bar['direction'],
}
fx_list.append(fx)
return True
bar.update(direction=last_direction + np.sign(last_direction))
return False
# 有包含关系,不需要进行分型识别,趋势不改变,direction数值增加
bar.update(direction=last_direction + np.sign(last_direction))
new_bars.pop(-1) # 有包含关系的前一根数据被删除,这里是个技巧
# 有包含关系,按方向分别处理,同时需要更新日期
if last_direction > 0:
if cur_h < last_h:
bar.update(high=last_h, date=last_dt)
if cur_l < last_l:
bar.update(low=last_l)
elif last_direction < 0:
if cur_l > last_l:
bar.update(low=last_l, date=last_dt)
if cur_h > last_h:
bar.update(high=last_h)
else:
logging.error('{} last_direction: {} is wrong'.format(last_dt, last_direction))
raise ValueError
new_bars.append(bar)
return False
class XdList(object):
"""存放线段"""
def __init__(self, bars, indicators, trade_date):
# 传入的是地址,不要修改
self.bars = bars
self.indicators = indicators
self.trade_date = trade_date
# item存放数据元素
self.xd_list = [] # 否则指向同一个地址
# 低级别的中枢
self.zs_list = []
self.sig_list = []
# next是低一级别的线段
self.next = None
# prev 指向高一级别的线段
self.prev = None
def __len__(self):
return len(self.xd_list)
def __getitem__(self, item):
return self.xd_list[item]
def __setitem__(self, key, value):
self.xd_list[key] = value
def append(self, value):
self.xd_list.append(value)
def update_zs(self):
"""
{
'zs_start': 进入段的起点
'zs_end': 离开段的终点
'ZG': 中枢高点,
'ZD': 中枢低点,
'GG': 中枢最低点,
'DD': 中枢最高点,
'xd_list': list[dict]
'location': 中枢位置
}
"""
xd_list = self.xd_list
if len(xd_list) < 3:
return False
zs_list = self.zs_list
if len(zs_list) < 1:
assert len(xd_list) < 4
zg = xd_list[0] if xd_list[0]['fx_mark'] > 0 else xd_list[1]
zd = xd_list[0] if xd_list[0]['fx_mark'] < 0 else xd_list[1]
zs = {
'ZG': zg,
'ZD': zd,
'GG': [zg], # 初始用list储存,记录高低点的变化过程,中枢完成时可能会回退
'DD': [zd], # 根据最高最低点的变化过程可以识别时扩散,收敛,向上还是向下的形态
'xd_list': xd_list[:2],
'weight': 1, # 记录中枢中段的数量
'location': 0, # 初始状态为0,说明没有方向, -1 表明下降第1个中枢, +2 表明上升第2个中枢
'real_loc': 0 # 除去只有一段的中枢
}
zs_list.append(zs)
return False
# 确定性的笔参与中枢构建
last_zs = zs_list[-1]
xd = xd_list[-2]
if TradeDate(last_zs['xd_list'][-1]['date']) >= TradeDate(xd['date']):
# 已经计算过中枢
return False
if xd['fx_mark'] > 0:
# 三卖 ,滞后,实际出现了一买信号
if xd['value'] < last_zs['ZD']['value']:
zs_end = last_zs['xd_list'].pop(-1)
if zs_end['date'] == last_zs['DD'][-1]['date']:
last_zs['DD'].pop(-1)
last_zs.update(
zs_end=zs_end,
weight=last_zs['weight'] - 1,
DD=last_zs['DD'],
real_loc=last_zs['real_loc'] + 1 if last_zs['weight'] == 2 else last_zs['real_loc']
)
zs = {
'zs_start': xd_list[-4],
'ZG': xd,
'ZD': zs_end,
'GG': [xd],
'DD': [zs_end],
'xd_list': [zs_end, xd],
'weight': 1,
'location': -1 if last_zs['location'] >= 0 else last_zs['location'] - 1,
'real_loc': -1 if last_zs['real_loc'] >= 0 else last_zs['real_loc'] - 1,
}
zs_list.append(zs)
return True
elif xd['value'] < last_zs['ZG']['value']:
last_zs.update(ZG=xd)
# 有可能成为离开段
elif xd['value'] > last_zs['GG'][-1]['value']:
last_zs['GG'].append(xd)
elif xd['fx_mark'] < 0:
# 三买,滞后,实际出现了一卖信号
if xd['value'] > last_zs['ZG']['value']:
zs_end = last_zs['xd_list'].pop(-1)
if zs_end['date'] == last_zs['GG'][-1]['date']:
last_zs['GG'].pop(-1)
last_zs.update(
zs_end=zs_end,
weight=last_zs['weight'] - 1,
GG=last_zs['GG'],
real_loc=last_zs['real_loc'] - 1 if last_zs['weight'] == 2 else last_zs['real_loc']
)
zs = {
'zs_start': xd_list[-4],
'ZG': zs_end,
'ZD': xd,
'GG': [zs_end],
'DD': [xd],
'xd_list': [zs_end, xd],
'weight': 1,
'location': 1 if last_zs['location'] <= 0 else last_zs['location'] + 1,
'real_loc': 1 if last_zs['real_loc'] <= 0 else last_zs['real_loc'] + 1,
}
zs_list.append(zs)
return True
elif xd['value'] > last_zs['ZD']['value']:
last_zs.update(ZD=xd)
# 有可能成为离开段
elif xd['value'] < last_zs['DD'][-1]['value']:
last_zs['DD'].append(xd)
else:
raise ValueError
last_zs['xd_list'].append(xd)
last_zs['weight'] = last_zs['weight'] + 1
return False
def update_xd_eigenvalue(self):
trade_date = self.trade_date
xd = self.xd_list[-1]
last_xd = self.xd_list[-2]
# xd.update(pct_change=(xd['value'] - last_xd['value']) / last_xd['value'])
#
start = trade_date.index(last_xd['date'])
end = trade_date.index(xd['date'])
kn = end - start + 1
fx_mark = kn * np.sign(xd.get('fx_mark', xd.get('direction', 0)))
dif = self.indicators.macd[end]['dif']
macd = sum([x['macd'] for x in self.indicators.macd[start: end + 1] if fx_mark * x['macd'] > 0])
xd.update(fx_mark=fx_mark, dif=dif, macd=macd)
# xd.update(fx_mark=fx_mark, dif=dif, avg_macd=macd/kn)
def update_xd(self):
"""更新笔分型序列
分型记对象样例:
{
'date': Timestamp('2020-11-26 00:00:00'),
'fx_mark': -8, 低点,负数,表示下降趋势持续的K线根数
'value': 138.0,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
{
'date': Timestamp('2020-11-26 00:00:00'),
'fx_mark': 7, 高点, 正数,表示上升趋势持续的根数
'value': 150.67,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
"""
# 至少3根同类型分型才可能出现线段,最后1根bi不确定,因此最后一段也不确定
if self.next is None:
self.next = XdList(self.bars, self.indicators, self.trade_date)
bi_list = self.xd_list
xd_list = self.next
if len(bi_list) < 4:
return False
if len(xd_list) < 1:
# 线段不存在,初始化线段,找4个点的最高和最低点组成线段
bi_list = bi_list[:-1].copy()
bi_list = sorted(bi_list, key=lambda x: x['value'], reverse=False)
if TradeDate(bi_list[0]['date']) < TradeDate(bi_list[-1]['date']):
xd_list.append(bi_list[0])
xd_list.append(bi_list[-1])
else:
xd_list.append(bi_list[-1])
xd_list.append(bi_list[0])
xd_list.update_xd_eigenvalue()
return True
bi3 = bi_list[-3]
xd = bi_list[-1].copy()
last_xd = xd_list[-1]
xd2 = xd_list[-2]
# if xd['date'] > pd.to_datetime('2016-07-12'):
# print('test')
# 非分型结尾段,直接替换成分型, 没有新增段,后续不需要处理,同一个端点确认
if 'direction' in last_xd or xd['date'] == last_xd['date']:
xd_list[-1] = xd # 日期相等的情况是否已经在内存中修改过了?
xd_list.update_xd_eigenvalue()
return True
# assert xd['date'] > last_xd['date']
if TradeDate(xd['date']) <= TradeDate(last_xd['date']):
util_log_info('The {} quotes bar input maybe wrong!'.format(xd['date']))
if bi3['fx_mark'] > 0: # -1和-3笔的方向相同,-1笔由于是未确认笔,可能没有fx_mark字段
# 同向延续
if last_xd['fx_mark'] > 0 and xd['value'] > last_xd['value']:
xd_list[-1] = xd
xd_list.update_xd_eigenvalue()
return True
# 反向判断
elif last_xd['fx_mark'] < 0:
# 价格判断
if xd['value'] > xd2['value']:
xd_list.append(xd)
xd_list.update_xd_eigenvalue()
return True
# 出现三笔破坏线段,连续两笔,一笔比一笔高,寻找段之间的最高点
elif TradeDate(bi3['date']) > TradeDate(last_xd['date']) and xd['value'] > bi3['value']:
index = -5
bi = bi_list[index]
# # 连续两个高点没有碰到段前面一个低点
# try:
# if TradeDate(bi['date']) < TradeDate(last_xd['date']) and \
# bi_list[index - 1]['value'] > bi3['value'] and \
# bi_list[index]['value'] > xd['value']:
# return False
# except Exception as err:
# pass
# # util_log_info('Last xd {}:{}'.format(last_xd['date'], err))
while TradeDate(bi['date']) > TradeDate(last_xd['date']):
if xd['value'] < bi['value']:
xd = bi
index = index - 2
bi = bi_list[index]
xd_list.append(xd)
xd_list.update_xd_eigenvalue()
return True
elif bi3['fx_mark'] < 0:
# 同向延续
if last_xd['fx_mark'] < 0 and xd['value'] < last_xd['value']:
xd_list[-1] = xd
xd_list.update_xd_eigenvalue()
return True
# 反向判断
elif last_xd['fx_mark'] > 0:
# 价格判断
if xd['value'] < xd2['value']:
xd_list.append(xd)
xd_list.update_xd_eigenvalue()
return True
# 出现三笔破坏线段,连续两笔,一笔比一笔低,将最低的一笔作为段的起点,避免出现最低点不是端点的问题
elif TradeDate(bi3['date']) > TradeDate(last_xd['date']) and xd['value'] < bi3['value']:
index = -5
bi = bi_list[index]
# 连续两个个低点没有碰到段前面一高低点
# try:
# if TradeDate(bi['date']) < TradeDate(last_xd['date']) and \
# bi_list[index - 1]['value'] < bi3['value'] and \
# bi_list[index]['value'] < xd['value']:
# return False
# except Exception as err:
# pass
# # util_log_info('Last xd {}:{}'.format(last_xd['date'], err))
while TradeDate(bi['date']) > TradeDate(last_xd['date']):
if xd['value'] > bi['value']:
xd = bi
index = index - 2
bi = bi_list[index]
xd_list.append(xd)
xd_list.update_xd_eigenvalue()
return True
return False
def update_sig(self):
"""
线段更新后调用,判断是否出现买点
"""
if len(self.zs_list) < 1:
return False
zs = self.zs_list[-1]
xd = self.xd_list[-1]
xd_list = zs['xd_list'].copy()
if 'zs_start' in zs:
xd_list.insert(0, zs['zs_start'])
sig = {
'date': self.bars[-1]['date'],
'real_loc': zs['real_loc'],
'location': zs['location'],
'weight': zs['weight'],
# 'fx_mark': xd['fx_mark'],
# 'last_mark': last_xd['fx_mark'],
# 'time_ratio': abs(xd['fx_mark'] / last_xd['fx_mark']) * 100,
# 'pct_change': xd['pct_change'] * 100,
# 'macd': xd['macd'],
# 'avg_macd': xd['avg_macd'],
}
# if sig['date'] >= pd.to_datetime('2021-07-28'):
# print(sig['date'])
if xd['fx_mark'] > 0: # 上升趋势
# sig.update(GG_macd=zs['GG'][-1].get('macd', np.nan), GG_avg_macd=zs['GG'][-1].get('avg_macd', np.nan))
# if zs['location'] > 0 and zs.get('zs_start', False):
# sig.update(start_macd=zs['zs_start']['macd'], start_avg_macd=zs['zs_start']['avg_macd'])
sig.update(boll=self.indicators.boll[-1].get('UB', np.nan) / self.bars[-1]['high'] * 100 - 100)
if xd['value'] > zs['GG'][-1]['value']:
xd_mark = -1 # 如果weight=1, 背驰,有可能1卖
# resistance = np.nan
# support = zs['GG'][-1]['value'] / xd['value'] - 1
elif xd['value'] > zs['ZG']['value']:
xd_mark = -2 # 如果weight=1, 背驰,有可能2卖
# resistance = zs['GG'][-1]['value'] / xd['value'] - 1
# support = zs['ZG']['value'] / xd['value'] - 1
elif xd['value'] > zs['ZD']['value']:
if sig['weight'] == 1:
xd_mark = -2
else:
xd_mark = -2.5
# resistance = zs['ZG']['value'] / xd['value'] - 1
# support = zs['ZD']['value'] / xd['value'] - 1
elif xd['value'] > zs['DD'][-1]['value']:
xd_mark = -3 # 三卖
# resistance = zs['ZD']['value'] / xd['value'] - 1
# support = zs['DD'][-1]['value'] / xd['value'] - 1
else:
xd_mark = -4 # 三卖
# resistance = zs['DD'][-1]['value'] / xd['value'] - 1
# support = np.nan
elif xd['fx_mark'] < 0: # 下降趋势
# sig.update(DD_macd=zs['DD'][-1].get('macd', np.nan), DD_avg_macd=zs['DD'][-1].get('avg_macd', np.nan))
# if zs['location'] < 0 and zs.get('zs_start', False):
# sig.update(start_macd=zs['zs_start']['macd'], start_avg_macd=zs['zs_start']['avg_macd'])
sig.update(boll=100 - self.indicators.boll[-1].get('LB', np.nan) / self.bars[-1]['low'] * 100)
if xd['value'] > zs['GG'][-1]['value']: # >GG的情况不会出现,因为当3买没有确认时,离开段的最高点也归属于当前中枢
xd_mark = 4 # 三买
# resistance = np.nan
# support = zs['GG'][-1]['value'] / xd['value'] - 1
elif xd['value'] > zs['ZG']['value']:
xd_mark = 3
# resistance = zs['GG'][-1]['value'] / xd['value'] - 1
# support = zs['ZG']['value'] / xd['value'] - 1
elif xd['value'] > zs['ZD']['value']:
if sig['weight'] == 1:
xd_mark = 2
else:
xd_mark = 2.5
# resistance = zs['ZG']['value'] / xd['value'] - 1
# support = zs['ZD']['value'] / xd['value'] - 1
elif xd['value'] >= zs['DD'][-1]['value']: # 如果和中枢最低点的值相同,归为2买,因为段没有升级
xd_mark = 2 # 如果weight=1, 背驰,有可能2买
# resistance = zs['ZD']['value'] / xd['value'] - 1
# support = zs['DD'][-1]['value'] / xd['value'] - 1
else:
xd_mark = 1 # 如果weight=1, 背驰,有可能1买
# resistance = zs['DD'][-1]['value'] / xd['value'] - 1
# support = np.nan
else:
raise ValueError
# sig.update(xd_mark=xd_mark, support=support * 100, resistance=resistance * 100)
sig.update(xd_mark=xd_mark)
start_xd = xd_list[-1]
# 当前线段持续的时间和幅度,下跌趋势回撤的比例
sig.update(valueback=(self.bars[-1]['close'] / start_xd['value'] - 1) * 100)
sig.update(timeback=xd['fx_mark'])
if xd_mark in [3, -3, 4, -4]: # 3买卖点,macd指标比较没有意义
sig.update(start=start_xd['fx_start'], dif=0, macd=0)
self.sig_list.append(sig)
return
direction = np.sign(xd['fx_mark'])
xd_list.reverse()
# 寻找段的起点,比较背离,一般是中枢+进入段的最高点或者最点
for idx, _xd in enumerate(xd_list[1:]):
if idx % 2 == 0: # 同向段
if _xd['value'] * direction > xd['value'] * direction:
break
else:
if _xd['value'] * direction < start_xd['value'] * direction:
start_xd = _xd
# break
sig.update(start=start_xd['fx_start'])
index = xd_list.index(start_xd) - 1
if index < 0: # 只有当前一笔,无法比较
sig.update(dif=0, macd=0)
self.sig_list.append(sig)
return
cmp_xd = xd_list[index]
compare_dif = cmp_xd.get('dif')
compare_macd = cmp_xd.get('macd')
dif = xd.get('dif')
macd = xd.get('macd')
if compare_dif and dif:
if dif * direction > compare_dif * direction:
sig.update(dif=-1)
else:
sig.update(dif=1)
if compare_macd and macd:
if macd * direction > compare_macd * direction:
sig.update(macd=-1)
else:
sig.update(macd=1)
self.sig_list.append(sig)
def update(self):
self.update_zs()
# 计算对应买卖点
self.update_sig()
return self.update_xd()
def update_bi(new_bars: list, fx_list: list, bi_list: XdList, trade_date: list):
"""更新笔序列
笔标记对象样例:和分型标记序列结构一样
{
'date': Timestamp('2020-11-26 00:00:00'),
'code': code,
'fx_mark': 'd',
'value': 138.0,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
{
'date': Timestamp('2020-11-26 00:00:00'),
'code': code,
'fx_mark': 'g',
'value': 150.67,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
return: True 笔的数据出现更新,包括新增笔或者笔的延续
"""
# 每根k线都要对bi进行判断
bar = new_bars[-1].copy()
if TradeDate(bar['date']) < TradeDate(trade_date[-1]):
# 包含的K线,不会改变bi的状态,不需要处理
return False
if len(fx_list) < 2:
return False
bi = fx_list[-1].copy()
# 没有笔时.最开始两个分型作为第一笔,增量更新时从数据库取出两个端点构成的笔时确定的
if len(bi_list) < 1:
bi2 = fx_list[-2].copy()
bi_list.append(bi2)
bi_list.append(bi)
bi_list.update_xd_eigenvalue()
return False
last_bi = bi_list[-1]
bar.update(value=bar['high'] if bar['direction'] > 0 else bar['low'])
# if bar['date'] > pd.to_datetime('2020-09-08'):
# print('error')
# k 线确认模式,当前K线的日期比分型K线靠后,说明进来的数据时K线
if TradeDate(bar['date']) > TradeDate(bi['fx_end']):
if 'direction' not in last_bi: # bi的结尾是分型
# 趋势延续替代,首先确认是否延续, 由于处理过包含,高低点可能不正确,反趋势的极值点会忽略
# 下一根继续趋势,端点后移,如果继续反趋势,该点忽略
# todo 处理过包含的bar,有一个判断是多余的,直接用bar['value] 参与判断
if (last_bi['fx_mark'] > 0 and bar['high'] > last_bi['value']) \
or (last_bi['fx_mark'] < 0 and bar['low'] < last_bi['value']):
bi_list[-1] = bar
bi_list.update_xd_eigenvalue()
return True
try:
kn_inside = trade_date.index(bar['date']) - trade_date.index(last_bi['fx_end']) - 1
except:
print('error')
# todo 至少2根k线, 时间确认必须被和前一笔方向相反,会出现端点不是极值点的情况
if kn_inside > 1 and bar['direction'] * last_bi['fx_mark'] < 0:
# 寻找同向的第一根分型
index = -1
while TradeDate(bi['date']) > TradeDate(last_bi['date']):
if bar['direction'] * bi['fx_mark'] > 0:
break
index = index - 1
bi = fx_list[index]
if (bar['direction'] * bi['fx_mark'] > 0) \
and (np.sign(bar['direction']) * bar['value'] < bi['fx_mark'] * bi['value']):
bi['fx_end'] = bar['date'] # 影响似乎不大?
bi_list.append(bi)
else:
bi_list.append(bar)
bi_list.update_xd_eigenvalue()
return True
# 只有一个端点,没有价格确认
if len(bi_list) < 2:
return False
# 价格确认
# todo 处理过包含的bar,有一个判断是多余的,直接用bar['value] 参与判断
if (last_bi['fx_mark'] < 0 and bar['high'] > bi_list[-2]['value']) \
or (last_bi['fx_mark'] > 0 and bar['low'] < bi_list[-2]['value']):
bi_list.append(bar)
bi_list.update_xd_eigenvalue()
return True
else: # 原有未出现分型笔的延续
assert bar['direction'] * last_bi['direction'] > 0
# if bar['direction'] * last_bi['direction'] < 0:
# print('error')
# return False
bi_list[-1] = bar
bi_list.update_xd_eigenvalue()
return True
return False
# 非分型结尾笔,直接替换成分型, 没有新增笔,后续不需要处理,同一个端点确认
if 'direction' in last_bi or bi['date'] == last_bi['date']:
bi_list[-1] = bi
bi_list.update_xd_eigenvalue()
return True
# fx_end处理,分型处理完后,因为分型确认滞后,所以还需要对fx_end 也就是当前K线进行处理,否则会出现缺失或者识别滞后的问题
# 由于时分型,只需要判断延续的问题,因此K线的方向要和上一笔一致
def handle_fx_end():
assert bar['date'] == bi['fx_end']
if bar['direction'] * last_bi['fx_mark'] < 0:
return False
if last_bi['fx_mark'] * bar['value'] > last_bi['fx_mark'] * last_bi['value']:
bi_list[-1] = bar
bi_list.update_xd_eigenvalue()
return True
# 分型处理,连续高低点处理,只判断是否后移,没有增加笔
# bi的fx_mark不一定为+1或者-1,因为要用sign函数取符号
# todo 为什么用 and 连接两个 if 结果错误
if last_bi['fx_mark'] * bi['fx_mark'] > 0:
if np.sign(last_bi['fx_mark']) * last_bi['value'] < bi['fx_mark'] * bi['value']:
bi_list[-1] = bi
bi_list.update_xd_eigenvalue()
return True
else:
# 笔确认是条件1、时间破坏,两个不同分型间至少有一根K线,2、价格破坏,向下的一笔破坏了上一笔的低点
kn_inside = trade_date.index(bi['fx_start']) - trade_date.index(last_bi['fx_end']) - 1
if kn_inside > 0: # 两个分型间至少有1根k线,端点有可能不是高低点
index = -2
while TradeDate(fx_list[index]['date']) > TradeDate(last_bi['date']):
# 分析的fx_mark取值为-1和+1
if (bi['fx_mark'] * fx_list[index]['fx_mark'] > 0) \
and (bi['fx_mark'] * bi['value'] < fx_list[index]['fx_mark'] * fx_list[index]['value']):
bi = fx_list[index].copy()
# 分型结尾不变
bi['fx_end'] = fx_list[-1]['fx_end']
index = index - 1
bi_list.append(bi)
bi_list.update_xd_eigenvalue()
return True
# 只有一个端点,没有价格确认
if len(bi_list) < 2:
return False
# 价格确认
# todo 处理过包含的bar,有一个判断是多余的,直接用bar['value] 参与判断
if (bi['fx_mark'] > 0 and bi['value'] > bi_list[-2]['value']) \
or (bi['fx_mark'] < 0 and bi['value'] < bi_list[-2]['value']):
bi_list.append(bi)
bi_list.update_xd_eigenvalue()
return True
return handle_fx_end()
class CzscBase:
def __init__(self):
# self.freq = freq
# assert isinstance(code, str)
# self.code = code.upper()
self.trade_date = [] # 用来查找索引
self.bars = []
self.indicators = IndicatorSet(self.bars)
# self.indicators = None
self.new_bars = []
self.fx_list = []
self.xd_list = XdList(self.bars, self.indicators, self.trade_date) # bi作为线段的head
self.sig_list = []
def update(self):
# 有包含关系时,不可能有分型出现,不出现分型时才需要
self.indicators.update()
try:
update_fx(bars=self.bars, new_bars=self.new_bars, fx_list=self.fx_list, trade_date=self.trade_date)
except:
print('error')
if not update_bi(
new_bars=self.new_bars, fx_list=self.fx_list, bi_list=self.xd_list, trade_date=self.trade_date
):
return
# 新增确定性的笔才处理段
xd_list = self.xd_list
result = True
index = 0
while result:
result = xd_list.update()
# 计算对应买卖点
if len(xd_list.sig_list) > 0:
signal = xd_list.sig_list[-1]
# signal.update(xd=index)
# self.sig_list.append(signal)
if index == 0:
signal.update(xd=0)
self.sig_list.append(signal)
else:
# 有趋势或者中枢段升级
if xd_list.zs_list[-1]['location'] != 0 or xd_list.zs_list[-1]['weight'] > 7:
last_sig = self.sig_list[-1]
last_sig.update(xd=index, xd_mark=signal['xd_mark'])
last_sig['real_loc'] = signal['real_loc']
last_sig['location'] = signal['location']
last_sig['weight'] = signal['weight']
last_sig['valueback'] = signal['valueback']
last_sig['timeback'] = signal['timeback']
# if signal['xd_mark'] in [1, -1]:
last_sig['dif{}'.format(index)] = signal.get('dif')
last_sig['macd{}'.format(index)] = signal.get('macd')
# else:
# util_log_info('High level xd {} == low level xd {}'.format(index, index - 1))
temp_list = xd_list
xd_list = xd_list.next
xd_list.prev = temp_list
index = index + 1
# 必须实现,每次输入一个行情数据,然后调用update看是否需要更新
def on_bar(self, bar):
"""
输入数据格式
Index(['open', 'high', 'low', 'close', 'amount', 'volume', 'date', 'code'], dtype='object')
'date' 未 timestamp volume用来画图
"""
raise NotImplementedError
class CzscMongo(CzscBase):
def __init__(self, code='rul8', data=None, start=None, end=None, freq='day', exchange=None):
# 只处理一个品种
super().__init__()
self.code = code
self.freq = freq
self.exchange = exchange
# self._bi_list = fetch_future_bi_day(self.code, limit=2, format='dict')
self._bi_list = []
self.old_count = len(self._bi_list)
if len(self._bi_list) > 0:
# self.fx_list = self._bi_list
start = self._bi_list[-1]['fx_end']
elif start is None:
start = '1990-01-01'
if data is None:
self.data = get_bar(code, start=start, end=end, freq=freq, exchange=exchange)
# self.data = get_bar(code, start, end='2020-12-09', freq=freq, exchange=exchange)
else:
self.data = data
def draw(self, chart_path=None):
if len(self.bars) < 1:
return
chart = kline_pro(
kline=self.bars, fx=self.fx_list,
bs=[], xd=self.xd_list,
# title=self.code + '_' + self.freq, width='1520px', height='580px'
title=self.code + '_' + self.freq, width='2540px', height='850px'
)
if not chart_path:
chart_path = 'E:\\signal\\{}_{}.html'.format(self.code, self.freq)
chart.render(chart_path)
webbrowser.open(chart_path)
def on_bar(self, bar):
"""
bar 格式
date 默认为 Timestamp,主要时画图函数使用
"""
bar = bar.to_dict()
# if 'trade' in bar:
# bar['vol'] = bar.pop('trade')
# bar['date'] = pd.to_datetime(bar['date'])
self.bars.append(bar)
try:
self.update()
except Exception as error:
util_log_info(error)
def run(self, start=None, end=None):
if self.data is None or self.data.empty:
util_log_info('{} {} quote data is empty'.format(self.code, self.freq))
return
self.data.apply(self.on_bar, axis=1)
# self.save()
def save(self, collection=FACTOR_DATABASE.future_bi_day):
try:
logging.info('Now Saving Future_BI_DAY==== {}'.format(str(self.code)))
code = self.code
old_count = self.old_count
new_count = len(self._bi_list)
# 更新的数据,最后一个数据是未确定数据
update_count = new_count - old_count
if update_count < 2:
return
bi_list = self._bi_list[old_count:new_count - 1]
start = bi_list[0]['date']
end = bi_list[-1]['date']
logging.info(
'UPDATE_Future_BI_DAY \n Trying updating {} from {} to {}'.format(code, start, end),
)
collection.insert_many(bi_list)
except Exception as error:
print(error)
def save_sig(self, collection=FACTOR_DATABASE.czsz_sig_day):
try:
logging.info('Now Saving CZSC_SIG_DAY==== {}'.format(str(self.code)))
code = self.code
xd = self.xd_list
index = 0
sig = []
while xd:
df = pd.DataFrame(xd.sig_list)
df['xd'] = index
df['code'] = code
df['exchange'] = self.exchange
sig.append(df)
xd = xd.next
index = index + 1
sig_df = pd.concat(sig).set_index(['date', 'xd']).sort_index()
old_count = self.old_count
new_count = len(self._bi_list)
# 更新的数据,最后一个数据是未确定数据
update_count = new_count - old_count
if update_count < 2:
return
bi_list = self._bi_list[old_count:new_count - 1]
start = bi_list[0]['date']
end = bi_list[-1]['date']
logging.info(
'UPDATE_Future_BI_DAY \n Trying updating {} from {} to {}'.format(code, start, end),
)
collection.insert_many(bi_list)
except Exception as error:
print(error)
def to_csv(self):
if len(self.sig_list) < 1:
return
sig_df = pd.DataFrame(self.sig_list).set_index('date')
filename = 'E:\\signal\\{}_{}_{}.csv'.format(self.code, self.freq, sig_df.index[-1].strftime('%Y-%m-%d'))
sig_df.to_csv(filename)
def to_df(self):
xd = self.xd_list
index = 0
sig = []
while xd:
df = pd.DataFrame(xd.sig_list)
df['xd'] = index
df['code'] = self.code
df['exchange'] = self.exchange
sig.append(df)
xd = xd.next
index = index + 1
try:
sig_df = pd.concat(sig).set_index(['date', 'xd']).sort_index()
return sig_df
except:
util_log_info("{} signal is empty!".format(self.code))
return pd.DataFrame()
def to_json(self):
xd = self.xd_list
if len(xd) < 1:
return
index = 0
data = []
while xd:
data.append(
{
'xd{}'.format(index): xd.xd_list,
'zs{}'.format(index): xd.zs_list,
'sig{}'.format(index): xd.sig_list
}
)
xd = xd.next
index = index + 1
with open("{}_{}.json".format(self.code, self.freq), "w") as write_file:
json.dump(data, write_file, indent=4, sort_keys=True, cls=DataEncoder)
def calculate_bs_signals(security_df: pd.DataFrame, last_trade_date=None):
sig_list = []
if len(security_df) < 1:
util_log_info("=============Security list is empty!==========")
return
class_name = security_df.iloc[0]['class']
if last_trade_date is None:
last_trade_date = util_get_real_date(datetime.today().strftime('%Y-%m-%d'))
# last_trade_time = pd.to_datetime(util_get_next_day(last_trade_date))
last_trade_date = pd.to_datetime(last_trade_date)
index = 0
for code, item in security_df.iterrows():
exchange = item['exchange']
util_log_info("============={} {} Signal==========".format(code, exchange))
try:
hq = get_bar(code, end=last_trade_date, freq='day', exchange=exchange)
except:
util_log_info("============={} {} read hq incorrectly!==========".format(code, exchange))
continue
# future流动性过滤,成交量过滤
if class_name == 'future':
amount = hq.iloc[-1]['volume']
if amount < 10000:
util_log_info(
"===={} {} volume is few!====".format(code, exchange)
)
continue
else:
amount = hq.iloc[-1]['amount']
# 可转债价格过滤
if class_name == 'convertible':
if hq.iloc[-1]['close'] > 130:
util_log_info(
"===={} {} price is too high!====".format(code, exchange)
)
continue
if amount < 1000000: # 100万
util_log_info(
"===={} {} amount is few!====".format(code, exchange)
)
continue
elif class_name == 'hkconnect':
try:
amount = hq.iloc[-1]['hk_stock_amount']
except:
util_log_info(
"===={} {} KeyError: 'hk_stock_amount'====".format(code, exchange)
)
amount = hq.iloc[-1]['volume'] * hq.iloc[-1]['close'] * 100
if amount < 10000000: # 1000万
util_log_info(
"===={} {} amount is few!====".format(code, exchange)
)
continue
else:
if amount < 10000000: # 1000万
util_log_info(
"===={} {} amount is few!====".format(code, exchange)
)
continue
try:
# 复权处理
if class_name in ['stock', 'ETF']:
hq = data_stock_to_fq(hq, fqtype='qfq')
czsc_day = CzscMongo(code=code, data=hq, freq='day', exchange=exchange)
except Exception as error:
util_log_info("{} : {}".format(code, error))
continue
if len(czsc_day.data) < 1:
util_log_info("==========={} {} 0 Quotes==========".format(code, exchange))
continue
if czsc_day.data.iloc[-1]['date'] < last_trade_date:
util_log_info(
"=={} {} last trade date {}==".format(
code, exchange, czsc_day.data.iloc[-1]['date'].strftime('%Y-%m-%d'))
)
continue
czsc_day.run()
sig_day_list = czsc_day.sig_list
if len(sig_day_list) < 1:
continue
last_day_sig = sig_day_list[-1]
if last_day_sig['date'] < last_trade_date:
util_log_info(
"===={} {} last Signal {}====".format(code, exchange, last_day_sig['date'].strftime('%Y-%m-%d'))
)
continue
# 笔中枢走势的起点,如果是上升趋势的买点,从当前中枢的最高点开始计算,如果是卖点,从上升趋势的起点开始
xd_list = czsc_day.xd_list
zs_list = xd_list.zs_list
if len(zs_list) < 1:
continue
xd_mark = last_day_sig['xd_mark']
if xd_mark < 0: # 只考虑做多
continue
# if xd_mark < 0:
# xd = zs_list[-1]['DD'][-1]
# else:
# xd = zs_list[-1]['GG'][-1]
#
# start = xd.get('fx_start')
last_day_sig.update(deviation=last_day_sig.get('dif') + last_day_sig.get('macd', 0))
for idx in range(1, last_day_sig['xd'] + 1):
dif = 0 if np.isnan(last_day_sig.get('dif{}'.format(idx))) else last_day_sig.get('dif{}'.format(idx))
macd = 0 if np.isnan(last_day_sig.get('macd{}'.format(idx))) else last_day_sig.get('macd{}'.format(idx))
deviation = last_day_sig.get('deviation', 0)
last_day_sig.update(deviation=deviation + dif + macd)
start = xd_list.sig_list[-1]['start']
hq = get_bar(code, start=start, end=last_trade_date, freq='5min', exchange=exchange)
# 复权处理
if class_name in ['stock', 'ETF']:
hq = data_stock_to_fq(hq, fqtype='qfq')
czsc_min = CzscMongo(code=code, data=hq, freq='5min', exchange=exchange)
try:
if len(czsc_min.data) < 1:
util_log_info("========={} {} 0 5min Quotes========".format(code, exchange))
continue
except:
util_log_info("========={} {} 5min Quotes file is not exists!========".format(code, exchange))
continue
if czsc_min.data.iloc[-1]['date'] < last_trade_date:
util_log_info(
"==Please Update {} {} 5min Quotes from {}==".format(
code, exchange, czsc_day.data.iloc[-1]['date'].strftime('%Y-%m-%d'))
)
continue
czsc_min.run()
sig_min_list = czsc_min.sig_list
if len(sig_min_list) < 1:
continue
last_min_sig = sig_min_list[-1]
if last_min_sig['date'] < last_trade_date:
continue
df = | pd.DataFrame(sig_min_list) | pandas.DataFrame |
# coding=utf-8
# Author: <NAME>
# Date: April 06, 2020
#
# Description: Information on FPKM genes and how much it maps to protein coding genes
#
#
import math
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
| pd.set_option('display.max_columns', 500) | pandas.set_option |
import unittest
import itertools
import os
import pandas as pd
import platform
import numpy as np
import numba
import hpat
from hpat.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs, dist_IR_contains)
from hpat.hiframes.rolling import supported_rolling_funcs
LONG_TEST = (int(os.environ['HPAT_LONG_ROLLING_TEST']) != 0
if 'HPAT_LONG_ROLLING_TEST' in os.environ else False)
test_funcs = ('mean', 'max',)
if LONG_TEST:
# all functions except apply, cov, corr
test_funcs = supported_rolling_funcs[:-3]
class TestRolling(unittest.TestCase):
def test_fixed1(self):
# test sequentially with manually created dfs
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = hpat.jit(test_impl)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
def test_fixed2(self):
# test sequentially with generated dfs
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = hpat.jit(test_impl)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
def test_fixed_apply1(self):
# test sequentially with manually created dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = hpat.jit(test_impl)
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for args in itertools.product(wins, centers):
df = | pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) | pandas.DataFrame |
import os
import pandas as pd
from subprocess import call
import re
benchmarks = ['Celecoxib rediscovery', 'Troglitazone rediscovery', 'Thiothixene rediscovery',
'Aripiprazole similarity', 'Albuterol similarity', 'Mestranol similarity', 'C11H24',
'C9H10N2O2PF2Cl', 'Median molecules 1', 'Median molecules 2', 'Osimertinib MPO',
'Fexofenadine MPO', 'Ranolazine MPO', 'Perindopril MPO', 'Amlodipine MPO', 'Sitagliptin MPO',
'Zaleplon MPO', 'Valsartan SMARTS', 'Scaffold Hop', 'Deco Hop']
selected_lengths = [1, 1, 1, 100, 100, 100, 159, 250, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
top_100_lengths = [100] * 20
def scrub_population_scores(nohup):
"""Use to remove population scores from nohup when they don't need to appear in graph"""
lines = open(nohup, 'r').readlines()
lines_out = []
for line in lines:
if 'Population mean' not in line:
lines_out.append(line)
open(nohup, 'w').writelines(lines_out)
def break_into_many(f_name, rule='selected_output_smiles'):
"""Separate output smiles into many files to use rd_filters"""
new_path = re.sub(os.path.basename(f_name), rule, f_name)
os.makedirs(new_path, exist_ok=True)
if rule == 'selected_output_smiles':
rules = selected_lengths
elif rule == 'top_100_output_smiles':
rules = top_100_lengths
else:
raise UnboundLocalError
with open(f_name, 'r') as f:
lines = f.readlines()
line_no = 0
for name, l in zip(benchmarks, rules):
try:
write_lines = ''.join(lines[line_no:line_no + l])
except IndexError:
print(f_name + ' failed at ' + name)
break
line_no += l
new_name = re.sub(' ', '_', name) + '_smiles.smi'
new_name = os.path.join(new_path, new_name)
with open(new_name, 'w') as w:
w.write(write_lines)
w.close()
f.close()
def describe_failures(f_name):
"""Use alert collection csv to figure out what rule set tripped a failure."""
alerts = | pd.read_csv('data/alert_collection.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
__file__
preprocess.py
__description__
pre-processing the data:
- text cleaning
- merging synonyms
- stemming
- cleaning attribute
- building attribute_description
- extracting brand and size for products
__author__
<NAME>
"""
from __future__ import print_function
from nlp_utils import *
import cPickle
import pandas as pd
import project_params as pp
import sys
from spell_corr import spell_check_dict
import re
def prog():
print(".",end='')
sys.stdout.flush()
def longprog():
print("....",end='')
sys.stdout.flush()
def clean_attributes(df):
def cat_text(x):
res = '%s %s' % (x['name'], x['value'])
return res
df['attribute_description'] = list(df.apply(cat_text, axis=1)); prog()
remove_bullet = lambda x: re.sub(r'(bullet\d+)', r' ', x)
df['attribute_description'] = df['attribute_description'].map(remove_bullet); prog()
def has_size_attribute(x):
if ('height' in x) | ('width' in x) | ('length' in x) | ('depth' in x):
return True
else:
return False
df['has_size'] = df['name'].map(has_size_attribute); prog()
dfSize = df.loc[df.has_size, ['product_uid','value']]
df = df.drop(['has_size'],axis=1)
all_sizes = dfSize.groupby('product_uid').agg(lambda x : ' '.join(x))
indx = all_sizes.index.map(int)
dfSize = pd.DataFrame({'product_uid':list(indx), 'size_attribute':list(all_sizes['value'])})
prog()
dfBrand = df.loc[df['name'] == 'MFG Brand Name',['product_uid','value']].rename(columns={"value": "brand"})
dfBrand['brand']= dfBrand['brand'].map(lambda x: x.lower())
all_descr = df[['product_uid','attribute_description']].groupby('product_uid').agg(lambda x: ' '.join(x))
indx = all_descr.index.map(int)
prog()
df = pd.DataFrame({'product_uid':list(indx), 'attribute_description':list(all_descr['attribute_description'])})
df = pd.merge(df,dfSize,on='product_uid',how='left')
df = df.fillna(u'unknownsize')
df = pd.merge(df,dfBrand,on='product_uid',how='left')
df = df.fillna(u'unknownbrand')
return df
def extra_clean(word):
word = word.replace('kholerhighland', 'kohler highline')
word = word.replace('smart', ' smart ')
word = word.replace('residential', ' residential ')
word = word.replace('whirlpool', ' whirlpool ')
word = word.replace('alexandrea',' alexandria ')
word = word.replace('bicycle',' bicycle ')
word = word.replace('non',' non ')
word = word.replace('replacement',' replacement')
word = word.replace('mowerectrical', 'mow electrical')
word = word.replace('dishwaaher', 'dishwasher')
word = word.replace('fairfield',' fairfield ')
word = word.replace('hooverwindtunnel','hoover windtunnel')
word = word.replace('airconditionerwith','airconditioner with ')
word = word.replace('pfistersaxton', 'pfister saxton')
word = word.replace('eglimgton','ellington')
word = word.replace('chrome', ' chrome ')
word = word.replace('foot', ' foot ')
word = word.replace('samsung', ' samsung ')
word = word.replace('galvanised', ' galvanised ')
word = word.replace('exhaust', ' exhaust ')
word = word.replace('reprobramable', 'reprogramable')
word = word.replace('rackcloset', 'rack closet ')
word = word.replace('hamptonbay', ' hampton bay ')
word = word.replace('cadet', ' cadet ')
word = word.replace('weatherstripping', 'weather stripping')
word = word.replace('poyurethane', 'polyurethane')
word = word.replace('refrigeratorators','refrigerator')
word = word.replace('baxksplash','backsplash')
word = word.replace('inches',' inch ')
word = word.replace('conditioner',' conditioner ')
word = word.replace('landscasping',' landscaping ')
word = word.replace('discontinuedbrown',' discontinued brown ')
word = word.replace('drywall',' drywall ')
word = word.replace('carpet', ' carpet ')
word = word.replace('less', ' less ')
word = word.replace('tub', ' tub')
word = word.replace('tubs', ' tub ')
word = word.replace('marble',' marble ')
word = word.replace('replaclacemt',' replacement ')
word = word.replace('non',' non ')
word = word.replace('soundfroofing', 'sound proofing')
return word
def str_clean_stem_lower(s):
try:
s = s.lower()
s = extra_clean(s)
s = re.sub(r"(\w)\.([A-Z])", r"\1 \2", s)
s = re.sub(r"([0-9]+)( *)(inches|inch|in|')\.?", r"\1in. ", s)
s = re.sub(r"([0-9]+)( *)(foot|feet|ft|'')\.?", r"\1ft. ", s)
s = re.sub(r"([0-9]+)( *)(pounds|pound|lbs|lb)\.?", r"\1lb. ", s)
s = re.sub(r"([0-9]+)( *)(square|sq) ?\.?(feet|foot|ft)\.?", r"\1sq.ft. ", s)
s = re.sub(r"([0-9]+)( *)(gallons|gallon|gal)\.?", r"\1gal. ", s)
s = re.sub(r"([0-9]+)( *)(ounces|ounce|oz)\.?", r"\1oz. ", s)
s = re.sub(r"([0-9]+)( *)(centimeters|cm)\.?", r"\1cm. ", s)
s = re.sub(r"([0-9]+)( *)(milimeters|mm)\.?", r"\1mm. ", s)
s = re.sub(r"([0-9]+)( *)(degrees|degree)\.?", r"\1deg. ", s)
s = re.sub(r"([0-9]+)( *)(volts|volt)\.?", r"\1volt. ", s)
s = re.sub(r"([0-9]+)( *)(watts|watt)\.?", r"\1watt. ", s)
s = re.sub(r"([0-9]+)( *)(amperes|ampere|amps|amp)\.?", r"\1amp. ", s)
s = s.replace(" x "," xby ")
s = s.replace("*"," xby ")
s = s.replace(" by "," xby")
s = s.replace("x0"," xby 0")
s = s.replace("x1"," xby 1")
s = s.replace("x2"," xby 2")
s = s.replace("x3"," xby 3")
s = s.replace("x4"," xby 4")
s = s.replace("x5"," xby 5")
s = s.replace("x6"," xby 6")
s = s.replace("x7"," xby 7")
s = s.replace("x8"," xby 8")
s = s.replace("x9"," xby 9")
s = s.replace("0x","0 xby ")
s = s.replace("1x","1 xby ")
s = s.replace("2x","2 xby ")
s = s.replace("3x","3 xby ")
s = s.replace("4x","4 xby ")
s = s.replace("5x","5 xby ")
s = s.replace("6x","6 xby ")
s = s.replace("7x","7 xby ")
s = s.replace("8x","8 xby ")
s = s.replace("9x","9 xby ")
s = s.replace("whirpool","whirlpool")
s = s.replace("whirlpoolga", "whirlpool")
s = s.replace("whirlpoolstainless","whirlpool stainless")
s = s.replace(" "," ")
# using default stemmer from nlp_utils:
s = (' ').join([stemmer.stem(z) for z in s.split(' ')])
if s == '':
s = u'null'
return s.lower()
except:
return u'null'
if __name__ == '__main__':
######### reading csv files #############
print("Loading data.",end='')
dfTrain = pd.read_csv(pp.train_raw_file,encoding=pp.encoding); prog()
dfTest = pd.read_csv(pp.test_raw_file,encoding=pp.encoding); prog()
dfAttribute = pd.read_csv(pp.attribute_raw_file,encoding=pp.encoding); prog()
dfProdDescription = pd.read_csv(pp.description_raw_file,encoding=pp.encoding); prog()
dfSynTrain = | pd.read_csv(pp.synonyms_train_raw_file,encoding=pp.encoding) | pandas.read_csv |
from __future__ import division
import numpy as np
import pandas
import math
import os
import types
import h5py
from six.moves import cPickle as pickle
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("white")
from ML_Tools.Plotting_And_Evaluation.Plotters import *
from ML_Tools.General.Misc_Functions import *
from ML_Tools.General.Ensemble_Functions import ensemblePredict, loadModel
from ML_Tools.General.Batch_Train import getFeature, batchEnsemblePredict
from keras.models import Sequential,model_from_json, load_model
from sklearn.model_selection import StratifiedKFold
dirLoc = "../Data/"
wFactor = 250000/50000
def AMS(s, b):
""" Approximate Median Significance defined as:
AMS = sqrt(
2 { (s + b + b_r) log[1 + (s/(b+b_r))] - s}
)
where b_r = 10, b = background, s = signal, log is natural logarithm """
br = 10.0
radicand = 2 *( (s+b+br) * math.log (1.0 + s/(b+br)) -s)
if radicand < 0:
print('radicand is negative. Exiting')
exit()
else:
return math.sqrt(radicand)
def amsScan(inData, scale=False):
best = [0,-1]
ams = []
for index, row in inData.iterrows():
s = scale[0]*np.sum(inData.loc[(inData['pred_class'] >= row['pred_class']) & (inData['gen_target'] == 1), 'gen_weight'])
b = scale[1]*np.sum(inData.loc[(inData['pred_class'] >= row['pred_class']) & (inData['gen_target'] == 0), 'gen_weight'])
ams.append(AMS(s, b))
if ams[-1] > best[1]:
best = [row['pred_class'], ams[-1]]
print(best)
return ams, best
def foldAMSScan(inData, N=10):
kf = StratifiedKFold(n_splits=N, shuffle=True)
folds = kf.split(inData, inData['gen_target'])
bests = []
for i, (train, test) in enumerate(folds):
bests.append(amsScan(inData.iloc[test], (np.sum(inData[(inData['gen_target'] == 1)]['gen_weight']), np.sum(inData[(inData['gen_target'] == 0)]['gen_weight'])))[1])
print("Fold {}, best AMS {} at cut of {}. Total weights Signal:Bkg. {}:{}".format(i, bests[-1][1], bests[-1][0],
np.sum(inData.iloc[test][inData.gen_target == 1]['gen_weight']),
np.sum(inData.iloc[test][inData.gen_target == 0]['gen_weight'])))
print("Mean cut", np.average([x[0] for x in bests], weights=[1/x[1] for x in bests]), "mean AMS", np.average([x[1] for x in bests], weights=[1/x[1] for x in bests]))
return bests
def amsScanQuick(inData, wFactor=250000./50000.):
s = np.sum(inData.loc[inData['gen_target'] == 1, 'gen_weight'])
b = np.sum(inData.loc[inData['gen_target'] == 0, 'gen_weight'])
tIIs = inData['pred_class'].argsort()
amss = np.empty([len(tIIs)])
amsMax = 0
threshold = 0.0
for tI in range(len(tIIs)):
# don't forget to renormalize the weights to the same sum
# as in the complete training set
amss[tI] = AMS(max(0,s * wFactor),max(0,b * wFactor))
if amss[tI] > amsMax:
amsMax = amss[tI]
threshold = inData['pred_class'].values[tIIs[tI]]
#print tI,threshold
if inData.loc[:, 'gen_target'].values[tIIs[tI]]:
s -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
else:
b -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
print (amsMax, threshold)
return amsMax, threshold
def scoreTest(ensemble, weights):
testData = h5py.File(dirLoc + 'testing.hdf5', "r+")
batchEnsemblePredict(ensemble, weights, testData, ensembleSize=10, verbose=1)
def saveTest(cut, name):
testData = h5py.File(dirLoc + 'testing.hdf5', "r+")
data = | pandas.DataFrame() | pandas.DataFrame |
# %%
import pandas as pd
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
def get_regions(records, threshold=0.25, min_skips=6):
df_aas = pd.DataFrame( [ list( f.seq ) for f in records] )
df_aas.columns += 1
df_aas["z"] = "-"
# sr_gaprate = df_aas.apply( lambda x:x.value_counts()['-'] , axis=0) / len( df_aas)
sr_gaprate = df_aas.apply( lambda x:x.value_counts() ).fillna(0).loc['-'] / len( df_aas)
targets = sr_gaprate.loc[ sr_gaprate <= threshold ].index.to_frame(name='pos')
targets['diff'] = targets.diff()
if len( targets) == 0:
print('warning : target is not found !')
return | pd.DataFrame({'start':[], 'end':[]}) | pandas.DataFrame |
####
## Title: Precipitable Water Data Extraction Module
## Author: <NAME> / <NAME>
## Documentation Page: https://git.io/fjVHo
####
import os
import csv
import sys
import requests
from numpy import *
import pandas as pd
import time
import datetime
from datetime import date as dte
from datetime import datetime as dt
from metpy.units import units
from siphon.simplewebservice.wyoming import WyomingUpperAir
from mesowest import MesoWest
from rich import print, box
from rich.panel import Panel
from rich.table import Table
from rich.progress import track
from rich.progress import BarColumn, TextColumn, TimeRemainingColumn, Progress, track
progress = Progress(TextColumn("[bold blue]{task.fields[filename]}", justify="right"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
TimeRemainingColumn())
progress.print(Panel(
"[bold deep_sky_blue2]Good Morning\nWelcome to the Data Extraction Module of the Precipitable Water Model. For more information about the model and the purpose of this tool, please visit the [link=https://git.io/fj5Xr]documentation page[/link]"))
## Timeout Retry
REQUESTS_MAX_RETRIES = int(os.getenv("REQUESTS_MAX_RETRIES", 10))
adapter = requests.adapters.HTTPAdapter(max_retries=REQUESTS_MAX_RETRIES)
## Imports Wyoming and MesoWest Site IDs
config = "../../data/import.conf"
cnfg = loadtxt(config, dtype=str, delimiter=":")
## Imports Sensor information
instr = "../../data/instruments.conf"
intr = loadtxt(instr, dtype=str, delimiter=",", unpack=True)[0]
## Data file used for model input
fname = '../../data/master_data.csv'
## Data file used for user input
wname = '../../data/cool_data.csv'
## Stations used
wy_station = cnfg[1][1].split(",")
mw_station = cnfg[0][1].split(",")
## Hours to pull
hour = [00, 12]
## Retrives column index for sensors
headr = pd.read_csv(wname, delimiter=",").columns
indx = [[], []]
for i in range(len(headr)):
if "Sky" in headr[i]:
indx[0].append(i)
elif "Ground" in headr[i] or "Standard" in headr[i]:
indx[1].append(i)
## A function that computes the closest value
def closest(lst, K, d):
lst = asarray(lst)
list = []
tmp2 = dt.combine(d, K)
for i in range(len(lst)):
list.append(abs(dt.combine(d, lst[i]) - tmp2))
idx = asarray(list).argmin()
return lst[idx]
## Imports Wyoming Data for specified site and date
def wyoming_import(end_date, station):
try:
df_12 = WyomingUpperAir.request_data(dt.combine(end_date, datetime.time(12, 0)), station)
pw12 = df_12.pw[0]
except (ValueError, IndexError):
pw12 = "NaN"
except requests.exceptions.HTTPError:
pw12 = "Error"
try:
df_00 = WyomingUpperAir.request_data(end_date + datetime.timedelta(days=1), station)
pw00 = df_00.pw[0]
except (ValueError, IndexError):
pw00 = "NaN"
except requests.exceptions.HTTPError:
pw00 = "Error"
return [station, [end_date, pw12, pw00]]
def mesowest_import(end_date, station, in_time):
df_mw = MesoWest.request_data(end_date + datetime.timedelta(days=1), station.strip(" "))
mw_header = df_mw.columns
for i in range(len(mw_header)):
if "time(" in mw_header[i]:
tau = i
else:
continue
if (str(in_time) in ['00:00:00', 'NaT']) or (str(df_mw[mw_header[tau]][0]) == 'NaT'):
rh = "NaN"
temp = "NaN"
thyme = "NaT"
else:
df_tm = df_mw.loc[(df_mw[mw_header[tau]] == closest(df_mw[mw_header[tau]], in_time, end_date))]
thyme = df_tm[mw_header[tau]].values[0]
rh = df_tm['relative_humidity'].values[0]
temp = round((float(df_tm['temperature'].values[0]) * units.degF).to(units.degC).magnitude, 2)
if str(rh) == "nan":
rh = "NaN"
if str(temp) == "nan":
temp = "NaN"
return [thyme, rh, temp]
def impt(end_date, idx):
cool_data = []
with filew as csvfile:
next(csv.reader(csvfile, delimiter=","))
for row in readw:
sky = [[] for _ in range(len(indx[0]))]
gro = [[] for _ in range(len(indx[1]))]
mtime = row[1].split('/')
condition = row[2].split('/')
for j in range(len(indx[0])):
sky[j] = row[indx[0][j]].split('/')
for k in range(len(indx[1])):
gro[k] = row[indx[1][k]].split('/')
comments = row[-1].split('/')
cool_data.append([mtime, condition, sky, gro, comments])
i = 0
wy_data = []
for j in wy_station:
i = 0
wy_out = wyoming_import(end_date, j.strip(" "))
while "Error" in wy_out[1]:
progress.console.log("[bold yellow] (001) Wyoming Sever Disconnected")
time.sleep(100)
wy_out = wyoming_import(end_date, j.strip(" "))
i = + 1
wy_data.append(wy_out)
neat = []
for i in range(idx, idx + 1):
neat.append(cool_data[i])
neat = neat[::-1]
mw_data = []
for j in mw_station:
mw_data.append(mesowest_import(end_date, j, | pd.to_datetime(neat[0][0][0]) | pandas.to_datetime |
import unittest
import pytest
import pandas as pd
from analitico.schema import generate_schema, apply_schema
from .test_mixin import TestMixin
# pylint: disable=no-member
@pytest.mark.django_db
class DatasetTests(unittest.TestCase, TestMixin):
""" Unit testing of Dataset functionality, reading, converting, transforms, saving, etc """
## Test creations
def test_dataset_csv1_basics(self):
""" Test empty dataset creation """
try:
ds = self.read_dataset_asset("ds_test_1.json")
self.assertEqual(ds.id, "ds_1")
df = ds.get_dataframe()
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertEqual(len(df), 3)
self.assertEqual(df.columns[0], "First")
self.assertEqual(df.columns[1], "Second")
self.assertEqual(df.columns[2], "Third")
self.assertEqual(df.iloc[0, 0], 10)
self.assertEqual(df.iloc[1, 1], 21)
self.assertEqual(df.iloc[2, 2], 32)
except Exception as exc:
raise exc
def test_dataset_csv2_types_default(self):
""" Test standard data type conversions """
try:
ds = self.read_dataset_asset("ds_test_2.json")
self.assertEqual(ds.id, "ds_2")
df = ds.get_dataframe()
self.assertEqual(df.dtypes[0], "int64")
self.assertEqual(df.dtypes[1], "O")
self.assertEqual(df.dtypes[2], "float64")
except Exception as exc:
raise exc
def test_dataset_csv3_types_cast_float(self):
""" Test forcing integers to be floating point instead """
try:
df = self.read_dataframe_asset("ds_test_3_cast_float.json")
# would normally be int, but was forced to float
self.assertEqual(df.dtypes[0], "float64")
self.assertEqual(df.dtypes[1], "O")
self.assertEqual(df.dtypes[2], "float64")
except Exception as exc:
raise exc
def test_dataset_csv3_types_cast_string(self):
""" Test forcing float column to string """
try:
df = self.read_dataframe_asset("ds_test_3_cast_string.json")
self.assertEqual(df.dtypes[0], "int64")
self.assertEqual(df.dtypes[1], "O")
# third column would be float, but is cast to string
self.assertEqual(df.dtypes[2], "O")
self.assertEqual(df.iloc[2, 2], "32.50")
except Exception as exc:
raise exc
def test_dataset_csv4_applyschema_rename(self):
""" Test reading a table then renaming a column """
try:
df = self.read_dataframe_asset("ds_test_4.json")
schema = generate_schema(df)
columns = schema["columns"]
self.assertEqual(len(columns), 3)
self.assertEqual(df.columns[1], "Second")
schema["columns"][1]["rename"] = "Secondo"
df = apply_schema(df, schema)
columns = df.columns
self.assertEqual(df.columns[1], "Secondo")
except Exception as exc:
raise exc
def test_dataset_csv4_applyschema_index(self):
""" Test reading a table then making a column its index """
try:
df = self.read_dataframe_asset("ds_test_4.json")
schema = generate_schema(df)
columns = schema["columns"]
self.assertEqual(len(columns), 3)
self.assertEqual(df.index.name, None)
schema["columns"][0]["index"] = True
df = apply_schema(df, schema)
columns = df.columns
self.assertEqual(df.index.name, "First")
except Exception as exc:
raise exc
def test_dataset_csv4_applyschema_index_rename(self):
""" Test reading a table then making a column its index then renaming it """
try:
df = self.read_dataframe_asset("ds_test_4.json")
schema = generate_schema(df)
columns = schema["columns"]
self.assertEqual(len(columns), 3)
self.assertEqual(df.index.name, None)
schema["columns"][0]["index"] = True
schema["columns"][0]["rename"] = "Primo"
df = apply_schema(df, schema)
columns = df.columns
self.assertEqual(df.index.name, "Primo")
self.assertEqual(df.columns[0], "Primo")
except Exception as exc:
raise exc
def test_dataset_csv4_types_datetime_iso8601(self):
""" Test reading datetime in ISO8601 format """
try:
df = self.read_dataframe_asset("ds_test_4.json")
self.assertEqual(df.dtypes[0], "int64")
self.assertEqual(df.dtypes[1], "O")
self.assertTrue(isinstance(df.iloc[0, 2], pd.Timestamp))
self.assertTrue(isinstance(df.iloc[1, 2], pd.Timestamp))
self.assertTrue(isinstance(df.iloc[2, 2], pd.Timestamp))
self.assertTrue(isinstance(df.iloc[3, 2], pd.Timestamp))
self.assertEqual(df.iloc[0, 2], pd.Timestamp("2019-01-20 00:00:00"))
self.assertEqual(df.iloc[1, 2], pd.Timestamp("2019-01-20 16:30:15"))
self.assertEqual(df.iloc[2, 2], pd.Timestamp("2019-02-01 00:00:00"))
self.assertEqual(df.iloc[3, 2], pd.Timestamp("2019-01-01 00:00:00"))
# Timezones are state machines from row to row...
# 2019-09-15T15:53:00
self.assertEqual(df.iloc[4, 2], pd.Timestamp("2019-09-15 15:53:00"))
# 2019-09-15T15:53:00+05:00 (changes timezone)
self.assertEqual(df.iloc[5, 2], pd.Timestamp("2019-09-15 10:53:00"))
# 2019-09-15T15:53:00 (maintains +5 timezone)
self.assertEqual(df.iloc[6, 2], pd.Timestamp("2019-09-15 15:53:00"))
# 2019-09-15T15:53:00+00 (reverts timezone)
self.assertEqual(df.iloc[7, 2], pd.Timestamp("2019-09-15 15:53:00"))
# 2019-09-15T15:53:00-01:30 (changes timezone)
self.assertEqual(df.iloc[8, 2], pd.Timestamp("2019-09-15 17:23:00"))
# 20080915T155300Z (zulu time)
self.assertEqual(df.iloc[9, 2], pd.Timestamp("2008-09-15 15:53:00"))
# Time only uses today's date: 15:53:00.322348
self.assertEqual(df.iloc[10, 2], pd.Timestamp("15:53:00.322348"))
# Examples:
# http://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a003169814.htm
except Exception as exc:
raise exc
def test_dataset_csv5_category_no_schema(self):
""" Test reading categorical data without a schema """
try:
df = self.read_dataframe_asset("ds_test_5_category_no_schema.json")
self.assertEqual(len(df.columns), 10)
self.assertEqual(df.columns[0], "id")
self.assertEqual(df.columns[1], "name")
self.assertEqual(df.columns[2], "slug")
self.assertEqual(df.columns[3], "parent_id")
self.assertEqual(df.columns[4], "depth")
self.assertEqual(df.columns[5], "priority")
self.assertEqual(df.columns[6], "max_weight")
self.assertEqual(df.columns[7], "frozen")
self.assertEqual(df.columns[8], "rate")
self.assertEqual(df.columns[9], "has_ingredients_book")
# Column types
self.assertEqual(df.dtypes[0], "int") # id
self.assertEqual(df.dtypes[1], "O") # name
self.assertEqual(df.dtypes[2], "O") # slug
self.assertEqual(df.dtypes[3], "float") # parent_id
self.assertEqual(df.dtypes[7], "int") # frozen
# Items types
self.assertEqual(type(df.iloc[0, 1]).__name__, "str") # name
self.assertEqual(type(df.iloc[0, 2]).__name__, "str") # slug
self.assertEqual(type(df.iloc[0, 3]).__name__, "float64") # parent_id
except Exception as exc:
raise exc
def test_dataset_csv5_category_with_schema(self):
""" Test reading categorical data with a schema, check types """
try:
df = self.read_dataframe_asset("ds_test_5_category_with_schema.json")
self.assertEqual(len(df.columns), 10)
self.assertEqual(df.columns[0], "id")
self.assertEqual(df.columns[1], "name")
self.assertEqual(df.columns[2], "slug")
self.assertEqual(df.columns[3], "parent_id")
self.assertEqual(df.columns[4], "depth")
self.assertEqual(df.columns[5], "priority")
self.assertEqual(df.columns[6], "max_weight")
self.assertEqual(df.columns[7], "frozen")
self.assertEqual(df.columns[8], "rate")
self.assertEqual(df.columns[9], "has_ingredients_book")
# Column types
self.assertEqual(df.dtypes[0], "int") # id
self.assertEqual(df.dtypes[1], "category") # name
self.assertEqual(df.dtypes[2], "category") # slug
self.assertEqual(df.dtypes[3], "float") # parent_id
self.assertEqual(df.dtypes[7], "bool") # frozen
except Exception as exc:
raise exc
def test_dataset_csv5_category_check_values(self):
""" Test reading categorical data, check values """
try:
df = self.read_dataframe_asset("ds_test_5_category_with_schema.json")
# Items types
self.assertEqual(type(df.iloc[0, 1]).__name__, "str") # name
self.assertEqual(type(df.iloc[0, 2]).__name__, "str") # slug
self.assertEqual(type(df.iloc[0, 3]).__name__, "float64") # parent_id
self.assertEqual(type(df.iloc[0, 7]).__name__, "bool_") # frozen
except Exception as exc:
raise exc
def test_dataset_csv5_category_no_index(self):
""" Test reading categorical data, check index column """
try:
df1 = self.read_dataframe_asset("ds_test_5_category_with_schema.json")
# By default the index column is the row number.
# If the dataset has an index or id row it is just like
# any other row and is not used to index the pandas dataset
self.assertFalse(df1.loc[205, "frozen"])
self.assertEqual(df1.loc[205, "slug"], "sughi-pronti-primi-piatti")
self.assertEqual(df1.loc[205, "parent_id"], 100150)
# Apply the correct index column manually
df2 = df1.set_index("id", drop=False)
self.assertFalse(df2.loc[205, "frozen"])
self.assertEqual(df2.loc[205, "slug"], "carne-tacchino")
self.assertEqual(df2.loc[205, "parent_id"], 100102)
except Exception as exc:
raise exc
def test_dataset_csv5_category_with_index(self):
""" Test reading categorical data, check explicit index column """
try:
df = self.read_dataframe_asset("ds_test_5_category_with_index.json")
self.assertFalse(df.loc[205, "frozen"])
self.assertEqual(df.loc[205, "slug"], "carne-tacchino")
self.assertEqual(df.loc[205, "parent_id"], 100102)
except Exception as exc:
raise exc
def test_dataset_csv6_weird_index_no_attr(self):
""" Test reading table with 'weird' index column explicitly marked in schema """
try:
df = self.read_dataframe_asset("ds_test_6_weird_index_no_attr.json")
self.assertEqual(df.loc[8, "slug"], "pasta-riso-cereali")
self.assertEqual(df.loc[27, "slug"], "2-alt-pasta")
except Exception as exc:
raise exc
def test_dataset_csv6_weird_index_with_attr(self):
""" Test reading table with 'weird' index column explicitly marked in schema """
try:
df = self.read_dataframe_asset("ds_test_6_weird_index_with_attr.json")
self.assertEqual(df.index.name, "indice")
self.assertEqual(df.loc[8, "slug"], "pane-pasticceria")
self.assertEqual(df.loc[27, "slug"], "sughi-scatolame-condimenti")
self.assertEqual(df.loc[100598, "slug"], "2-alt-salumi")
except Exception as exc:
raise exc
def test_dataset_csv7_timedelta(self):
""" Test timespan to timedelta automatic conversion """
try:
df = self.read_dataframe_asset("ds_test_7_autoschema.json")
# index is from column 'indice'
self.assertEqual(df.loc[1, "elapsed"], pd.Timedelta("1 day"))
self.assertEqual(df.loc[3, "elapsed"], pd.Timedelta("2 days"))
self.assertEqual(df.loc[4, "elapsed"], | pd.Timedelta("3 days") | pandas.Timedelta |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-03-01'),
pd.Timestamp('2011-02-01')])
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical(['X'])])
exp = Categorical([np.nan, np.nan, 'X'])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical([np.nan, np.nan])])
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]),
pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([]),
pd.Categorical([1.0])])
exp = Categorical([1.0])
tm.assert_categorical_equal(res, exp)
# to make dtype equal
nanc = pd.Categorical(np.array([np.nan], dtype=np.float64))
res = union_categoricals([nanc,
pd.Categorical([])])
tm.assert_categorical_equal(res, nanc)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan],
categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(['z', 'z', 'z'], categories=['x', 'y', 'z'])
c2 = Categorical(['x', 'x', 'x'], categories=['x', 'y', 'z'])
res = union_categoricals([c1, c2])
exp = Categorical(['z', 'z', 'z', 'x', 'x', 'x'],
categories=['x', 'y', 'z'])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3],
categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True,
sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['a', 'b', 'c', 'x', 'y', 'z'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
| tm.assert_categorical_equal(result, expected) | pandas.util.testing.assert_categorical_equal |
#!/usr/bin/env python
# coding: utf-8
# In[133]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
from gluonts.model.forecast import SampleForecast
# In[134]:
df=pd.read_csv('data_share.csv', sep=',', index_col = 0, parse_dates=True)
# In[135]:
df['y'] = | pd.to_numeric(df["y"], downcast="float") | pandas.to_numeric |
from __future__ import print_function
from .barcode import BarcodeSeqLib
from .barcodevariant import BcvSeqLib
from .barcodeid import BcidSeqLib
from .basic import BasicSeqLib
from .idonly import IdOnlySeqLib
from .overlap import OverlapSeqLib
from .config_check import seqlib_type
from .storemanager import StoreManager
import os
import pandas as pd
import numpy as np
import logging
import statsmodels.api as sm
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import scipy.stats as stats
from .sfmap import sfmap_plot
from .plots import (
fit_axes,
fit_axes_text,
volcano_plot,
configure_axes,
plot_colors,
weights_plot,
)
from .constants import WILD_TYPE_VARIANT, SYNONYMOUS_VARIANT
from .variant import protein_variant
from .dataframe import singleton_dataframe
#: map class names to class definitions to avoid use of globals()
SEQLIB_CLASSES = {
"BarcodeSeqLib": BarcodeSeqLib,
"BcvSeqLib": BcvSeqLib,
"BcidSeqLib": BcidSeqLib,
"BasicSeqLib": BasicSeqLib,
"IdOnlySeqLib": IdOnlySeqLib,
"OverlapSeqLib": OverlapSeqLib,
}
def regression_apply(row, timepoints, weighted):
"""
:py:meth:`pandas.DataFrame.apply` apply function for calculating
enrichment using linear regression. If *weighted* is ``True`` perform
weighted least squares; else perform ordinary least squares.
Weights for weighted least squares are included in *row*.
Returns a :py:class:`pandas.Series` containing regression coefficients,
residuals, and statistics.
"""
# retrieve log ratios from the row
y = row[["L_{}".format(t) for t in timepoints]]
# re-scale the x's to fall within [0, 1]
xvalues = [x / float(max(timepoints)) for x in timepoints]
# perform the fit
X = sm.add_constant(xvalues) # fit intercept
if weighted:
W = row[["W_{}".format(t) for t in timepoints]]
fit = sm.WLS(y, X, weights=W).fit()
else:
fit = sm.OLS(y, X).fit()
# re-format as a data frame row
values = np.concatenate(
[fit.params, [fit.bse["x1"], fit.tvalues["x1"], fit.pvalues["x1"]], fit.resid]
)
index = ["intercept", "slope", "SE_slope", "t", "pvalue_raw"] + [
"e_{}".format(t) for t in timepoints
]
return pd.Series(data=values, index=index)
class Selection(StoreManager):
"""
Class for a single selection replicate, consisting of multiple
timepoints. This class coordinates :py:class:`~seqlib.seqlib.SeqLib`
objects.
"""
store_suffix = "sel"
treeview_class_name = "Selection"
def __init__(self):
StoreManager.__init__(self)
self.libraries = dict()
self.barcode_maps = dict()
self._wt = None
self.logger = logging.getLogger("{}.{}".format(__name__, self.__class__))
def _children(self):
"""
Return the :py:class:`~seqlib.seqlib.SeqLib` objects as a list,
sorted by timepoint and then by name.
"""
libs = list()
for tp in self.timepoints:
libs.extend(sorted(self.libraries[tp], key=lambda x: x.name))
return libs
def remove_child_id(self, tree_id):
"""
Remove the reference to a :py:class:`~seqlib.seqlib.SeqLib` with
Treeview id *tree_id*. Deletes empty time points.
"""
empty = None
for tp in self.libraries:
tp_ids = [lib.treeview_id for lib in self.libraries[tp]]
if tree_id in tp_ids:
del self.libraries[tp][tp_ids.index(tree_id)]
if len(self.libraries[tp]) == 0:
empty = tp
break # found the id, stop looking
if empty is not None:
del self.libraries[empty]
@property
def timepoints(self):
return sorted(self.libraries.keys())
@property
def wt(self):
if self.has_wt_sequence():
if self._wt is None:
self._wt = self.children[0].wt.duplicate(self.name)
return self._wt
else:
if self._wt is not None:
raise ValueError(
"Selection should not contain wild type sequence [{}]".format(
self.name
)
)
else:
return None
def configure(self, cfg, configure_children=True):
"""
Set up the :py:class:`~selection.Selection` using the *cfg* object,
usually from a ``.json`` configuration file.
If *configure_children* is false, do not configure the children in
*cfg*.
"""
StoreManager.configure(self, cfg)
self.logger = logging.getLogger(
"{}.{} - {}".format(__name__, self.__class__.__name__, self.name)
)
if configure_children:
if "libraries" not in cfg:
raise KeyError(
"Missing required config value {} [{}]".format(
"libraries", self.name
)
)
for lib_cfg in cfg["libraries"]:
libtype = seqlib_type(lib_cfg)
if libtype is None:
raise ValueError("Unrecognized SeqLib config")
elif libtype in ("BcvSeqLib", "BcidSeqLib"):
lib = SEQLIB_CLASSES[libtype]()
# don't re-parse the barcode maps if possible
mapfile = lib_cfg["barcodes"]["map file"]
if mapfile in self.barcode_maps.keys():
lib.configure(lib_cfg, barcode_map=self.barcode_maps[mapfile])
else:
lib.configure(lib_cfg)
self.barcode_maps[mapfile] = lib.barcode_map
self.add_child(lib)
else:
# requires that the SeqLib derived classes be imported into the
# module namespace using "from x import y" style
lib = SEQLIB_CLASSES[libtype]()
lib.configure(lib_cfg)
self.add_child(lib)
def validate(self):
"""
Raises an informative ``ValueError`` if the time points in the analysis are not suitable.
Calls validate method on all child SeqLibs.
"""
# check the time points
if 0 not in self.timepoints:
raise ValueError("Missing timepoint 0 [{}]".format(self.name))
if self.timepoints[0] != 0:
raise ValueError("Invalid negative timepoint [{}]".format(self.name))
if len(self.timepoints) < 2:
raise ValueError("Multiple timepoints required [{}]".format(self.name))
elif len(self.timepoints) < 3 and self.scoring_method in ("WLS", "OLS"):
raise ValueError(
"Insufficient number of timepoints for regression scoring [{}]".format(
self.name
)
)
# check the wild type sequences
if self.has_wt_sequence():
for child in self.children[1:]:
if self.children[0].wt != child.wt:
self.logger.warning("Inconsistent wild type sequences")
break
# check that we're not doing wild type normalization on something with no wild type
# if not self.has_wt_sequence() and self.logr_method == "wt":
# raise ValueError("No wild type sequence for wild type normalization [{}]".format(self.name))
# validate children
for child in self.children:
child.validate()
def serialize(self):
"""
Format this object (and its children) as a config object suitable for dumping to a config file.
"""
cfg = StoreManager.serialize(self)
cfg["libraries"] = [child.serialize() for child in self.children]
return cfg
def add_child(self, child):
if child.name in self.child_names():
raise ValueError(
"Non-unique sequencing library name '{}' [{}]".format(
child.name, self.name
)
)
child.parent = self
# add it to the libraries dictionary
try:
self.libraries[child.timepoint].append(child)
except KeyError:
self.libraries[child.timepoint] = [child]
def is_barcodevariant(self):
"""
Return ``True`` if all :py:class:`~seqlib.seqlib.SeqLib` in the
:py:class:`~selection.Selection` are
:py:class:`~barcodevariant.BcvSeqLib` objects with
the same barcode map, else ``False``.
"""
return (
all(isinstance(lib, BcvSeqLib) for lib in self.children)
and len(self.barcode_maps.keys()) == 1
)
def is_barcodeid(self):
"""
Return ``True`` if all :py:class:`~seqlib.SeqLib` in the
:py:class:`~selection.Selection` are
:py:class:`~barcodeid.BcidSeqLib` objects with
the same barcode map, else ``False``.
"""
return (
all(isinstance(lib, BcidSeqLib) for lib in self.children)
and len(self.barcode_maps.keys()) == 1
)
def is_coding(self):
"""
Return ``True`` if the all :py:class:`~seqlib.seqlib.SeqLib` in the
:py:class:`~selection.Selection` count protein-coding variants, else
``False``.
"""
return all(x.is_coding() for x in self.children)
def has_wt_sequence(self):
"""
Return ``True`` if the all :py:class:`~seqlib.seqlib.SeqLib` in the
:py:class:`~selection.Selection` have a wild type sequence, else
``False``.
"""
return all(x.has_wt_sequence() for x in self.children)
def merge_counts_unfiltered(self, label):
"""
Counts :py:class:`~seqlib.seqlib.SeqLib` objects and tabulates counts
for each timepoint. :py:class:`~seqlib.seqlib.SeqLib` objects from
the same timepoint are combined by summing the counts.
Stores the unfiltered counts under ``/main/label/counts_unfiltered``.
"""
if self.check_store("/main/{}/counts_unfiltered".format(label)):
return
# calculate counts for each SeqLib
self.logger.info("Counting for each time point ({})".format(label))
for lib in self.children:
lib.calculate()
# combine all libraries for a given timepoint
self.logger.info("Aggregating SeqLib data")
destination = "/main/{}/counts_unfiltered".format(label)
if destination in self.store.keys():
# need to remove the current destination table because we are using append
# append is required because it takes the "min_itemsize" argument, and put doesn't
self.logger.info("Replacing existing '{}'".format(destination))
self.store.remove(destination)
# seqlib count table name for this element type
lib_table = "/main/{}/counts".format(label)
# create an index of all elements in the analysis
complete_index = pd.Index([])
for tp in self.timepoints:
for lib in self.libraries[tp]:
complete_index = complete_index.union(
pd.Index(lib.store.select_column(lib_table, "index"))
)
self.logger.info(
"Created shared index for count data ({} {})".format(
len(complete_index), label
)
)
# min_itemsize value
max_index_length = complete_index.map(len).max()
# perform operation in chunks
tp_frame = None
for i in xrange(0, len(complete_index), self.chunksize):
# don't duplicate the index if the chunksize is large
if self.chunksize < len(complete_index):
index_chunk = complete_index[i : i + self.chunksize]
else:
index_chunk = complete_index
self.logger.info(
"Merging counts for chunk {} ({} rows)".format(
i / self.chunksize + 1, len(index_chunk)
)
)
for tp in self.timepoints:
c = self.libraries[tp][0].store.select(lib_table, "index = index_chunk")
for lib in self.libraries[tp][1:]:
c = c.add(
lib.store.select(lib_table, "index = index_chunk"), fill_value=0
)
c.columns = ["c_{}".format(tp)]
if tp == 0:
tp_frame = c
else:
tp_frame = tp_frame.join(c, how="outer")
# save the unfiltered counts
if "/main/{}/counts_unfiltered".format(label) not in self.store:
self.store.append(
"/main/{}/counts_unfiltered".format(label),
tp_frame.astype(float),
min_itemsize={"index": max_index_length},
data_columns=list(tp_frame.columns),
)
else:
self.store.append(
"/main/{}/counts_unfiltered".format(label), tp_frame.astype(float)
)
def filter_counts(self, label):
"""
Converts unfiltered counts stored in ``/main/label/counts_unfiltered``
into filtered counts calculated from complete cases (elements with a
non-zero count in each time point).
For the most basic element type (variant or barcode, depending on the
experimental design), the result of this operation simply drops any
rows that have missing counts. For other element types, such as
synonymous variants, the counts are re-aggregated using only the
complete cases in the underlying element type.
"""
if (self.is_barcodeid() or self.is_barcodevariant()) and label != "barcodes":
# calculate proper combined counts
# df = self.store.select("/main/barcodes/counts") # this should exist because of the order of label calculations
# redo the barcode->variant/id mapping with the filtered counts
# NOT YET IMPLEMENTED
df = self.store.select(
"/main/{}/counts_unfiltered".format(label)
) # just do this for now
else:
df = self.store.select("/main/{}/counts_unfiltered".format(label))
df.dropna(axis="index", how="any", inplace=True)
self.store.put(
"/main/{}/counts".format(label),
df.astype(float),
format="table",
data_columns=df.columns,
)
def combine_barcode_maps(self):
if self.check_store("/main/barcodemap"):
return
bcm = None
for lib in self.children:
if bcm is None:
bcm = lib.store["/raw/barcodemap"]
else:
bcm = bcm.join(
lib.store["/raw/barcodemap"], rsuffix=".drop", how="outer"
)
new = bcm.loc[pd.isnull(bcm)["value"]]
bcm.loc[new.index, "value"] = new["value.drop"]
bcm.drop("value.drop", axis="columns", inplace=True)
bcm.sort_values("value", inplace=True)
self.store.put(
"/main/barcodemap", bcm, format="table", data_columns=bcm.columns
)
def calculate(self):
"""
Wrapper method to calculate counts and enrichment scores
for all data in the :py:class:`~selection.Selection`.
"""
if len(self.labels) == 0:
raise ValueError(
"No data present across all sequencing libraries [{}]".format(self.name)
)
for label in self.labels:
self.merge_counts_unfiltered(label)
self.filter_counts(label)
if self.is_barcodevariant() or self.is_barcodeid():
self.combine_barcode_maps()
if self.scoring_method == "counts":
pass
elif self.scoring_method == "ratios":
for label in self.labels:
self.calc_ratios(label)
elif self.scoring_method == "simple":
for label in self.labels:
self.calc_simple_ratios(label)
elif self.scoring_method in ("WLS", "OLS"):
if len(self.timepoints) <= 2:
raise ValueError(
"Regression-based scoring requires three or more time points."
)
for label in self.labels:
self.calc_log_ratios(label)
if self.scoring_method == "WLS":
self.calc_weights(label)
self.calc_regression(label)
else:
raise ValueError(
'Invalid scoring method "{}" [{}]'.format(
self.scoring_method, self.name
)
)
if self.scoring_method in ("ratios", "WLS", "OLS") and self.component_outliers:
if self.is_barcodevariant() or self.is_barcodeid():
self.calc_outliers("barcodes")
if self.is_coding():
self.calc_outliers("variants")
def calc_simple_ratios(self, label):
"""
Calculate simplified (original Enrich) ratios scores. This method does not produce standard errors.
"""
if self.check_store("/main/{}/scores".format(label)):
return
self.logger.info("Calculating simple ratios ({})".format(label))
c_last = "c_{}".format(self.timepoints[-1])
df = self.store.select(
"/main/{}/counts".format(label), "columns in ['c_0', c_last]"
)
# perform operations on the numpy values of the data frame for easier broadcasting
ratios = (df[c_last].values.astype("float") / df[c_last].sum(axis="index")) / (
df["c_0"].values.astype("float") / df["c_0"].sum(axis="index")
)
# make it a data frame again
ratios = | pd.DataFrame(data=ratios, index=df.index, columns=["ratio"]) | pandas.DataFrame |
from hydroDL import kPath, utils
from hydroDL.data import ntn
import pandas as pd
import numpy as np
import os
import time
import importlib
# save ntn data to csv files
dirNTN = os.path.join(kPath.dirData, 'EPA', 'NTN')
tabData = ntn.readDataRaw()
ntnIdLst = tabData['siteID'].unique().tolist()
varLst = ntn.varLst
td = pd.date_range(start='1979-01-01', end='2019-12-31')
tw = pd.date_range(start='1979-01-01', end='2019-12-31', freq='W-TUE')
ntnFolderD = os.path.join(dirNTN, 'csv', 'daily')
ntnFolderW = os.path.join(dirNTN, 'csv', 'weekly')
tt0 = time.time()
for kk, ntnId in enumerate(ntnIdLst):
tt1 = time.time()
tab = tabData[tabData['siteID'] == ntnId]
dfD = pd.DataFrame(index=td, columns=varLst, dtype=np.float32)
dfW = pd.DataFrame(index=tw, columns=varLst, dtype=np.float32)
for k in range(len(tab)):
t1 = pd.to_datetime(tab.iloc[k]['dateon']).date()
t2 = | pd.to_datetime(tab.iloc[k]['dateoff']) | pandas.to_datetime |
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import pandas as pd
import sqlite3
from dash.dependencies import Input, Output, State
import time
# import datetime
from datetime import datetime
from pandas import Series
from scipy import stats
from scipy.stats import norm
from numpy import arange,array,ones
import dash_table
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
app.config['suppress_callback_exceptions']=True
current_year = datetime.now().year
current_day = datetime.now().day
today = time.strftime("%Y-%m-%d")
dayofyear = time.strftime("%j")
dayofyear = int(dayofyear)
# daily normal temperatures
df_norms_max = pd.read_csv('./daily_normal_max.csv')
df_norms_min = pd.read_csv('./daily_normal_min.csv')
df_norms_max_ly = pd.read_csv('./daily_normal_max_ly.csv')
df_norms_min_ly = | pd.read_csv('./daily_normal_min_ly.csv') | pandas.read_csv |
'''
DERIVED FROM:https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/sklearn/README.rst
Preparing the Scikit-learn training script
Your Scikit-learn training script must be a Python 2.7 or 3.5 compatible source file.
The training script is very similar to a training script you might run outside of SageMaker,
but you can access useful properties about the training environment through various environment variables,
such as
- SM_MODEL_DIR:
A string representing the path to the directory to write model artifacts to.
These artifacts are uploaded to S3 for model hosting.
- SM_OUTPUT_DATA_DIR:
A string representing the filesystem path to write output artifacts to.
Output artifacts may include checkpoints, graphs, and other files to save,
not including model artifacts. These artifacts are compressed and uploaded
to S3 to the same S3 prefix as the model artifacts.
Supposing two input channels, 'train' and 'test',
were used in the call to the Scikit-learn estimator's fit() method,
the following will be set, following the format "SM_CHANNEL_[channel_name]":
- SM_CHANNEL_TRAIN:
A string representing the path to the directory containing data in the 'train' channel
- SM_CHANNEL_TEST:
Same as above, but for the 'test' channel.
A typical training script loads data from the input channels,
configures training with hyperparameters, trains a model,
and saves a model to model_dir so that it can be hosted later.
Hyperparameters are passed to your script as arguments and can
be retrieved with an argparse.ArgumentParser instance.
For example, a training script might start with the following:
Because the SageMaker imports your training script,
you should put your training code in a main guard (if __name__=='__main__':)
if you are using the same script to host your model,
so that SageMaker does not inadvertently run your training code at the wrong point in execution.
For more on training environment variables, please visit https://github.com/aws/sagemaker-containers.
'''
import argparse
import pandas as pd
import os
# GradientBoosting Regressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.externals import joblib
# Pipeline and StandardScaler
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Hyperparameters are described here. In this simple example we are just including one hyperparameter.
parser.add_argument('--learning_rate', type=float, default=0.1)
parser.add_argument('--n_estimators', type=int, default=100)
# Sagemaker specific arguments. Defaults are set in the environment variables.
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
args = parser.parse_args()
# Take the set of files and read them all into a single pandas dataframe
input_files = [os.path.join(args.train, file) for file in os.listdir(args.train) ]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel ({}) was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(args.train, "train"))
raw_data = [ | pd.read_csv(file, header=None, engine="python") | pandas.read_csv |
#!/usr/bin/env python
"""
make a summary of the project
wrapper output of goldclip pipeline
create stats and plots
## figure1.reads_mapping_stat.pdf
number of reads: raw, clean, no_dup, spikein, genome, unmap, ...
## figure2.reads_annotation_stat.pdf
number of reads: RNA categories, ...
## figure3.reads_correlation.pdf
## figure4.rtstop_correlation.pdf
## figure5.peak_number.pdf
## figure6.peak_length.pdf
## figure7.peak_annotation.pdf
## figure8.peak_motif.pdf
## figure9.peak_conservation.pdf
## figure10.hexmer_zscore.pdf
## figure11.peak_overlap.pdf
functions
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "2018-12-25"
__version__ = "0.1"
import os
import re
import random
import logging
import pandas as pd
# import numpy as np
from goldclip.helper import *
from goldclip.bin.bed_fixer import Bed_parser
from goldclip.bin.bed_motif import bed2motif
from goldclip.bin.bed_annotation import bed_annotator
from goldclip.goldcliplib.log_parser import *
from goldclip.goldcliplib.log_parser import Json_file
class Goldclip_report(object):
"""Record the output of Goldclip output
directory structure of goldclip output
01.trimming
02.genome_mapping
03.call_peaks
04.call_rtstops
05.report
"""
def __init__(self, project_path, project_name, genome, group='homer',
window=10000, threads=8, **kwargs):
self.project_path = project_path
self.project_name = project_name
self.genome = genome
# self.group = group
self.window = window # bam correlation
self.threads = threads
## directory structure
path_keys = [
'trim_path',
'align_path',
'peak_path',
'rtstop_path',
'report_path']
path_values = [
'01.trimming',
'02.genome_mapping',
'03.call_peaks',
'04.call_rtstops',
'05.report']
path_values = [os.path.join(self.project_path, i) for i in path_values]
self.project_subpath = dict(zip(path_keys, path_values))
def get_trim_stat(self):
"""Return the reads processing of fastq files
groups: raw, too_short, PCR_dup, no_dup
"""
path_trim = self.project_subpath['trim_path']
dfx = [] # list of pd.DataFrame
if os.path.exists(path_trim):
with os.scandir(path_trim) as it:
for entry in it:
if not entry.name.endswith('.cutadapt.json'):
continue
name = re.sub(r'.cutadapt.json', '', entry.name)
fn = os.path.join(path_trim, entry.name)
dd = Json_file(fn).json_reader()
# clean
fn_nodup = os.path.join(path_trim, name + '.clean_reads.txt')
with open(fn_nodup) as fi:
dd['nodup'] = next(fi).strip()
dd['too_short'] = int(dd['total']) - int(dd['clean'])
dd['dup'] = int(dd['clean']) - int(dd['nodup'])
dx = pd.DataFrame({'group': ['raw_count',
'too_short',
'PCR_dup',
'no_dup'],
name: [dd['total'],
dd['too_short'],
dd['dup'],
dd['nodup']]})
dx.set_index('group', inplace=True)
dfx.append(dx)
df = pd.DataFrame()
if len(dfx) > 0:
df = pd.concat(dfx, axis=1)
df = df.apply(pd.to_numeric)
df.insert(0, self.project_name, df.sum(axis=1))
return df
def get_map_stat(self):
"""Return the reads mapping
categories
"""
path_map = self.project_subpath['align_path']
dfx = []
with os.scandir(path_map) as it:
for entry in it:
if not entry.name.endswith('.mapping_stat.csv'):
continue
name = re.sub(r'.mapping_stat.csv', '', entry.name)
fn = os.path.join(path_map, entry.name)
# skip merged
if name == self.project_name:
continue
dx1 = pd.read_csv(fn, '\t')
dx1 = dx1.rename(index=str, columns={'Unnamed: 0': 'name'})
dx2 = pd.melt(dx1, id_vars=['name'], var_name='group', value_name='count')
dx3 = dx2[['group', 'count']].set_index('group').rename(index=str, columns={'count': name})
dfx.append(dx3)
df = pd.concat(dfx, axis=1)
df.insert(0, self.project_name, df.sum(axis=1))
return df
def get_bam_file(self, bam2bed=False, rep_only=False, merge_only=False):
path_map = self.project_subpath['align_path']
bam_files = []
with os.scandir(path_map) as it:
for entry in it:
if merge_only and not entry.name == self.project_name:
continue
elif rep_only and entry.name == self.project_name:
continue
else:
pass
bam = os.path.join(path_map, entry.name, entry.name + '.bam')
bed = os.path.join(path_map, entry.name, entry.name + '.bed')
if not os.path.exists(bam):
continue
if bam2bed:
if not os.path.exists(bed):
BAM(bam).to_bed()
bam_files.append(bed)
else:
bam_files.append(bam)
return bam_files
def get_peak_file(self, rep_only=False, merge_only=False):
path_peak = self.project_subpath['peak_path']
peak_files = []
with os.scandir(path_peak) as tools:
for tool in tools:
path_tool = os.path.join(path_peak, tool.name)
with os.scandir(path_tool) as smps:
for smp in smps:
if merge_only and not smp.name == self.project_name:
continue
elif rep_only and smp.name == self.project_name:
continue
else:
pass
fn = os.path.join(path_tool, smp.name,
smp.name + '.fixed.bed')
if os.path.exists(fn):
peak_files.append(fn)
return peak_files
def get_rtstop_file(self, rep_only=False, merge_only=False, rt_reads=False):
path_rtstop = self.project_subpath['rtstop_path']
rtstop_files = []
with os.scandir(path_rtstop) as it:
for entry in it:
if merge_only and not entry.name == self.project_name:
continue
elif rep_only and entry.name == self.project_name:
continue
else:
pass
fn_ext = '.RTRead.bed' if rt_reads else '.RTStop.bed'
fn = os.path.join(path_rtstop, entry.name, entry.name + fn_ext)
if os.path.exists(fn):
rtstop_files.append(fn)
return rtstop_files
##---------------------------------##
## statistics for figures
##---------------------------------##
def figure1_trim_map(self):
"""Trim, PCR_dup, mapped reads
"""
logging.info('figure1 reads mapping')
path_report = self.project_subpath['report_path']
project_name = self.project_name
figure1_path = os.path.join(path_report, 'read_mapping')
figure1_txt = os.path.join(figure1_path, 'read_mapping.txt')
assert is_path(figure1_path)
df1 = self.get_trim_stat()
df2 = self.get_map_stat()
df = pd.concat([df1, df2], axis=0, sort=False)
df.to_csv(figure1_txt, '\t', header=True, index=True)
return df
def figure2_read_anno(self, genome, group='homer'):
"""Return categories of mapped reads
# function
df = bed_annotator(args.i.name, args.g, args.t, path_data)
"""
logging.info('figure2 reads annotation')
path_report = self.project_subpath['report_path']
project_name = self.project_name
figure2_path = os.path.join(path_report, 'read_annotation')
figure2_txt = os.path.join(figure2_path, 'read_annotation.txt')
assert is_path(figure2_path)
bed_files = self.get_bam_file(bam2bed=True)
if len(bed_files) == 0:
logging.error('failed, bed files not found: %s' % path)
return None
# annotate files
dfx = []
for bed in bed_files:
df_anno = bed_annotator(bed, genome, group).drop(columns=['sample'])
dfx.append(df_anno)
df = | pd.concat(dfx, axis=1) | pandas.concat |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
import altair as alt
from microsetta_public_api.repo._alpha_repo import AlphaRepo
from microsetta_public_api.repo._metadata_repo import MetadataRepo
from microsetta_public_api.models._alpha import Alpha
from microsetta_public_api.api.metadata import _format_query, \
_validate_query, _filter_matching_ids
from microsetta_public_api.utils._utils import jsonify
def plot_alpha_filtered(alpha_metric=None, percentiles=None,
sample_id=None, **kwargs):
repo = MetadataRepo()
query = _format_query(kwargs)
is_invalid = _validate_query(kwargs, repo)
if is_invalid:
return is_invalid
return _plot_alpha_percentiles_querybuilder(alpha_metric, percentiles,
query, repo, sample_id)
def plot_alpha_filtered_json_query(body, alpha_metric=None, percentiles=None,
sample_id=None):
repo = MetadataRepo()
return _plot_alpha_percentiles_querybuilder(alpha_metric, percentiles,
body, repo, sample_id)
def _plot_alpha_percentiles_querybuilder(alpha_metric, percentiles, query,
repo, sample_id):
error_code, error_response, matching_ids = _filter_ids(repo, alpha_metric,
query, sample_id)
# TODO ideally these would raise an exception lower in the stack and
# then be handled by an exception handler, but for now they are clunky
# to refactor due to execution flow interruption
if error_response:
return error_response, error_code
if len(matching_ids) <= 1:
return jsonify(text='Did not find more than 1 ID\'s matching '
'request. Plot would be nonsensical.'), 422
alpha_summary, sample_diversity = _get_alpha_info(alpha_metric,
matching_ids,
percentiles, sample_id)
chart = _plot_percentiles_plot(alpha_metric, alpha_summary,
sample_diversity)
return jsonify(**chart.to_dict()), 200
def _plot_percentiles_plot(metric, summary, sample_value=None):
df = pd.DataFrame({'percentile': summary['percentile'],
'values': summary['percentile_values'],
}
)
chart = alt.Chart(df).encode(
x=alt.X("values", stack=None, title=metric),
y=alt.Y("percentile", title='Percentile'),
)
chart = chart.mark_area(opacity=0.3) + chart.mark_line() + \
chart.mark_point()
if sample_value:
# get_alpha_diversity returns a pd.Series, so subset it
sample_df = | pd.DataFrame({'sample-value': [sample_value]}) | pandas.DataFrame |
import os
import ast
import math
import json
import logging
import pathlib
import numpy as np
import pandas as pd
import opendssdirect as dss
from .pydss_parameters import *
from jade.utils.timing_utils import track_timing, Timer
from disco import timer_stats_collector
from disco.enums import LoadMultiplierType
from disco.exceptions import (
OpenDssCompileError,
OpenDssConvergenceError,
UpgradesExternalCatalogRequired,
UpgradesExternalCatalogMissingObjectDefinition,
InvalidOpenDssElementError,
)
logger = logging.getLogger(__name__)
@track_timing(timer_stats_collector)
def reload_dss_circuit(dss_file_list, commands_list=None, **kwargs):
"""This function clears the circuit and loads dss files and commands.
Also solves the circuit and checks for convergence errors
Parameters
----------
dss_file_list
commands_list
Returns
-------
"""
logger.info("-> Reloading OpenDSS circuit")
check_dss_run_command("clear")
if dss_file_list is None:
raise Exception("No OpenDSS files have been passed to be loaded.")
for dss_file in dss_file_list:
logger.info(f"Redirecting {dss_file}.")
check_dss_run_command(f"Redirect {dss_file}")
dc_ac_ratio = kwargs.get('dc_ac_ratio', None)
if dc_ac_ratio is not None:
change_pv_pctpmpp(dc_ac_ratio=dc_ac_ratio)
if commands_list is not None:
logger.info(f"Running {len(commands_list)} dss commands")
for command_string in commands_list:
check_dss_run_command(command_string)
if "new " in command_string.lower():
check_dss_run_command("CalcVoltageBases")
enable_pydss_solve = kwargs.get("enable_pydss_solve", False)
if enable_pydss_solve:
pydss_params = define_initial_pydss_settings(**kwargs)
circuit_solve_and_check(raise_exception=True, **pydss_params)
return pydss_params
else:
circuit_solve_and_check(raise_exception=True)
return kwargs
def run_selective_master_dss(master_filepath, **kwargs):
"""This function executes master.dss file line by line and ignores some commands that Solve yearly mode,
export or plot data.
Parameters
----------
master_filepath
Returns
-------
"""
run_dir = os.getcwd()
check_dss_run_command("Clear")
# logger.info("-->Redirecting master file:")
# check_dss_run_command(f"Redirect {master_filepath}")
# do this instead of redirect master to ignore some lines (e.g., that solve for the whole year)
os.chdir(os.path.dirname(master_filepath))
logger.debug(master_filepath)
with open(master_filepath, "r") as fr:
tlines = fr.readlines()
for line in tlines:
if ('Solve'.lower() in line.lower()) or ('Export'.lower() in line.lower()) or ('Plot'.lower() in line.lower()):
logger.info(f"Skipping this line: {line}")
continue
else:
check_dss_run_command(f"{line}")
circuit_solve_and_check(raise_exception=True, **kwargs)
os.chdir(run_dir)
return
@track_timing(timer_stats_collector)
def circuit_solve_and_check(raise_exception=False, **kwargs):
"""This function solves the circuit (both OpenDSS and PyDSS-if enabled)
and can raise exception if convergence error occurs
Parameters
----------
raise_exception
kwargs
Returns
-------
"""
calcvoltagebases = kwargs.pop("calcvoltagebases", False)
if calcvoltagebases:
check_dss_run_command("CalcVoltageBases")
dss_pass_flag = dss_solve_and_check(raise_exception=raise_exception)
pass_flag = dss_pass_flag
enable_pydss_solve = kwargs.get("enable_pydss_solve", False)
if enable_pydss_solve: # if pydss solver is also to be used
pydss_pass_flag = pydss_solve_and_check(raise_exception=raise_exception, **kwargs)
pass_flag = dss_pass_flag and pydss_pass_flag
return pass_flag
def dss_solve_and_check(raise_exception=False):
"""This function solves OpenDSS and returns bool flag which shows if it has converged or not.
Parameters
----------
raise_exception
Returns
-------
bool
"""
dss.Solution.Solve()
logger.debug("Solving circuit using OpenDSS")
# check_dss_run_command('CalcVoltageBases')
dss_pass_flag = dss.Solution.Converged()
if not dss_pass_flag:
logger.info(f"OpenDSS Convergence Error")
if raise_exception:
raise OpenDssConvergenceError("OpenDSS solution did not converge")
return dss_pass_flag
def dss_run_command_list(command_list):
for command_string in command_list:
check_dss_run_command(command_string)
return
def write_text_file(string_list, text_file_path):
"""This function writes the string contents of a list to a text file
Parameters
----------
string_list
text_file_path
Returns
-------
"""
pathlib.Path(text_file_path).write_text("\n".join(string_list))
def create_upgraded_master_dss(dss_file_list, upgraded_master_dss_filepath):
"""Function to create master dss with redirects to upgrades dss file.
The redirect paths in this file are relative to the file"""
command_list = []
for filename in dss_file_list:
rel_filename = os.path.relpath(filename, os.path.dirname(upgraded_master_dss_filepath))
command_list.append(f"Redirect {rel_filename}")
return command_list
def create_dataframe_from_nested_dict(user_dict, index_names):
"""This function creates dataframe from a nested dictionary
Parameters
----------
user_dict
index_names
Returns
-------
DataFrame
"""
df = pd.DataFrame.from_dict({(i, j): user_dict[i][j]
for i in user_dict.keys()
for j in user_dict[i].keys()},
orient='index')
df.index.names = index_names
return df.reset_index()
def get_dictionary_of_duplicates(df, subset, index_field):
"""This creates a mapping dictionary of duplicate indices in a dataframe
Parameters
----------
df
subset
index_field
Returns
-------
Dictionary
"""
df.set_index(index_field, inplace=True)
df = df[df.duplicated(keep=False, subset=subset)]
tuple_list = df.groupby(subset).apply(lambda x: tuple(x.index)).tolist()
mapping_dict = {v: tup[0] for tup in tuple_list for v in tup}
return mapping_dict
def get_scenario_name(enable_pydss_solve, pydss_volt_var_model):
"""This function determines the controller scenario
Parameters
----------
enable_pydss_solve : bool
pydss_volt_var_model
Returns
-------
str
"""
if enable_pydss_solve:
# scenario = pydss_volt_var_model.control1 # TODO can read in name instead
scenario = "control_mode"
else:
scenario = "pf1"
return scenario
@track_timing(timer_stats_collector)
def change_pv_pctpmpp(dc_ac_ratio):
"""This function changes PV system pctpmpp based on passed dc-ac ratio
newpctpmpp = oldpctpmpp / dc_ac_ratio
"""
dss.PVsystems.First()
for i in range(dss.PVsystems.Count()):
newpctpmpp = int(dss.Properties.Value('%Pmpp')) / dc_ac_ratio
command_string = f"Edit PVSystem.{dss.PVsystems.Name()} %Pmpp={newpctpmpp}"
check_dss_run_command(command_string)
dss.PVsystems.Next()
def get_feeder_stats(dss):
"""This function gives metadata stats for a feeder
Parameters
----------
dss
Returns
-------
dict
"""
load_kw = 0
load_kVABase = 0
pv_kw = 0
pv_kVARated = 0
load_df = dss.utils.loads_to_dataframe()
if len(load_df) > 0:
load_kw = load_df['kW'].sum()
load_kVABase = load_df['kVABase'].sum()
pv_df = dss.utils.pvsystems_to_dataframe()
if len(pv_df) > 0:
pv_kw = pv_df['kW'].sum()
pv_kVARated = pv_df['kVARated'].sum()
data_dict = {
'total_load(kVABase)': load_kVABase,
'total_load(kW)': load_kw,
'total_PV(kW)': pv_kw,
'total_PV(kVARated)': pv_kVARated,
}
return data_dict
def get_upgrade_stage_stats(dss, upgrade_stage, upgrade_type, xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs):
"""This function gives upgrade stage stats for a feeder
upgrade_stage can be Initial or Final
upgrade_type can be thermal or voltage
"""
final_dict = {"stage": upgrade_stage, "upgrade_type": upgrade_type}
ckt_info_dict = get_circuit_info()
final_dict["feeder_components"] = ckt_info_dict
final_dict["feeder_components"].update({
"num_nodes": dss.Circuit.NumNodes(),
"num_loads": dss.Loads.Count(),
"num_lines": dss.Lines.Count(),
"num_transformers": dss.Transformers.Count(),
"num_pv_systems": dss.PVsystems.Count(),
"num_capacitors": dss.Capacitors.Count(),
"num_regulators": dss.RegControls.Count(),
} )
equipment_dict = combine_equipment_health_stats(xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs)
final_dict.update(equipment_dict)
return final_dict
def combine_equipment_health_stats(xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs):
line_properties = kwargs.get("line_properties", None)
xfmr_properties = kwargs.get("xfmr_properties", None)
voltage_properties = kwargs.get("voltage_properties", None)
final_dict = {}
if line_properties is None:
line_properties = ['name', 'phases','normamps', 'kV', 'line_placement', 'length', 'units', 'max_amp_loading',
'max_per_unit_loading', 'status']
if xfmr_properties is None:
xfmr_properties = ['name', 'phases', 'windings', 'conns', 'kVs', 'kVAs', 'amp_limit_per_phase','max_amp_loading',
'max_per_unit_loading', 'status']
if voltage_properties is None:
voltage_properties = ['name', 'Max per unit voltage', 'Min per unit voltage', 'Overvoltage violation',
'Max voltage_deviation', 'Undervoltage violation', 'Min voltage_deviation']
# some file reformatting
if "conns" in xfmr_properties:
xfmr_loading_df["conns"] = xfmr_loading_df["conns"].apply(ast.literal_eval)
if "kVs" in xfmr_properties:
xfmr_loading_df["kVs"] = xfmr_loading_df["kVs"].apply(ast.literal_eval)
if "windings" in xfmr_properties:
xfmr_loading_df["windings"] = xfmr_loading_df["windings"].astype(int)
final_dict.update({"transformer_loading": xfmr_loading_df[xfmr_properties].to_dict(orient="records")})
final_dict.update({"line_loading": line_loading_df[line_properties].to_dict(orient="records")})
final_dict.update({"bus_voltage": bus_voltages_df[voltage_properties].to_dict(orient="records")})
return final_dict
def get_circuit_info():
"""This collects circuit information: source bus, feeder head info, substation xfmr information
Returns
-------
Dictionary
"""
data_dict = {}
dss.Vsources.First()
data_dict['source_bus'] = dss.CktElement.BusNames()[0].split(".")[0]
data_dict["feeder_head_name"] = dss.Circuit.Name()
dss.Circuit.SetActiveBus(data_dict['source_bus'])
data_dict["feeder_head_basekv"] = dss.Bus.kVBase()
data_dict["source_num_nodes"] = dss.Bus.NumNodes()
data_dict["total_num_buses_in_circuit"] = len(dss.Circuit.AllBusNames())
if data_dict["source_num_nodes"] > 1:
data_dict["feeder_head_basekv"] = round(data_dict["feeder_head_basekv"] * math.sqrt(3), 1)
data_dict["substation_xfmr"] = None
all_xfmr_df = get_thermal_equipment_info(compute_loading=False, equipment_type="transformer")
all_xfmr_df["substation_xfmr_flag"] = all_xfmr_df.apply(lambda x: int(
data_dict["source_bus"].lower() in x['bus_names_only']), axis=1)
if len(all_xfmr_df.loc[all_xfmr_df["substation_xfmr_flag"] == True]) > 0:
data_dict["substation_xfmr"] = all_xfmr_df.loc[all_xfmr_df["substation_xfmr_flag"] ==
True].to_dict(orient='records')[0]
data_dict["substation_xfmr"]["kVs"] = ast.literal_eval(data_dict["substation_xfmr"]["kVs"])
# this checks if the voltage kVs are the same for the substation transformer
data_dict["substation_xfmr"]["is_autotransformer_flag"] = len(set(data_dict["substation_xfmr"]["kVs"])) <= 1
return data_dict
def create_opendss_definition(config_definition_dict, action_type="New", property_list=None):
"""This function creates an opendss element definition for any generic equipment
Returns
-------
str
"""
command_string = f"{action_type} {config_definition_dict['equipment_type']}.{config_definition_dict['name']}"
logger.debug(f"New {config_definition_dict['equipment_type']}.{config_definition_dict['name']} being defined")
# these properties contain data (refer OpenDSS manual for more information on these parameters)
if property_list is None:
property_list = list(set(config_definition_dict.keys()) - {"name", "equipment_type"})
empty_field_values = ["----", "nan", "NaN", "None", None, np.nan]
for property_name in property_list:
if isinstance(config_definition_dict[property_name], float):
if np.isnan(config_definition_dict[property_name]):
continue
if config_definition_dict[property_name] in empty_field_values:
continue
# if the value is not empty and is not nan, only then add it into the command string
temp_s = f" {property_name}={config_definition_dict[property_name]}"
command_string = command_string + temp_s
return command_string
def ensure_line_config_exists(chosen_option, new_config_type, external_upgrades_technical_catalog):
"""This function check if a line config exists in the network.
If it doesn't exist, it checks the external catalog (if available) and returns a new dss definition string.
Returns
-------
str
"""
existing_config_dict = {"linecode": get_line_code(), "geometry": get_line_geometry()}
new_config_name = chosen_option[new_config_type].lower()
# if linecode or linegeometry is not present in existing network definitions
if not existing_config_dict[new_config_type]["name"].str.lower().isin([new_config_name]).any():
# add definition for linecode or linegeometry
if external_upgrades_technical_catalog is None:
raise UpgradesExternalCatalogRequired(f"External upgrades technical catalog not available to determine line config type")
external_config_df = pd.DataFrame(external_upgrades_technical_catalog[new_config_type])
if external_config_df["name"].str.lower().isin([new_config_name]).any():
config_definition_df = external_config_df.loc[external_config_df["name"] == new_config_name]
config_definition_dict = dict(config_definition_df.iloc[0])
if config_definition_dict["normamps"] != chosen_option["normamps"]:
logger.warning(f"Mismatch between noramps for linecode {new_config_name} and chosen upgrade option normamps: {chosen_option['name']}")
# check format of certain fields
matrix_fields = [s for s in config_definition_dict.keys() if 'matrix' in s]
for field in matrix_fields:
config_definition_dict[field] = config_definition_dict[field].replace("'","")
config_definition_dict[field] = config_definition_dict[field].replace("[","(")
config_definition_dict[field] = config_definition_dict[field].replace("]",")")
command_string = create_opendss_definition(config_definition_dict=config_definition_dict)
else:
raise UpgradesExternalCatalogMissingObjectDefinition(
f"{new_config_type} definition for {new_config_name} not found in external catalog."
)
else:
command_string = None
return command_string
def get_present_loading_condition():
""" Get present loading condition for all loads
Returns
-------
DataFrame
"""
load_dict = {}
dss.Circuit.SetActiveClass("Load")
flag = dss.ActiveClass.First()
while flag > 0:
# Get the name of the load
load_dict[dss.CktElement.Name()] = {
'Num_phases': float(dss.Properties.Value("phases")),
'kV': float(dss.Properties.Value("kV")),
'kVA': float(dss.Properties.Value("kVA")),
'kW': float(dss.Properties.Value("kW")),
'pf': dss.Properties.Value("pf"),
'Bus1': dss.Properties.Value("bus1"),
'Powers': dss.CktElement.Powers(),
'NetPower': sum(dss.CktElement.Powers()[::2]),
}
# Move on to the next Load...
flag = dss.ActiveClass.Next()
load_df = pd.DataFrame.from_dict(load_dict, "index")
return load_df
def get_present_storage_condition():
""" Get present operating condition for all storage
Returns
-------
DataFrame
"""
storage_dict = {}
dss.Circuit.SetActiveClass('Storage')
flag = dss.ActiveClass.First()
while flag > 0:
# Get the name of the load
storage_dict[dss.CktElement.Name()] = {
'Num_phases': float(dss.Properties.Value("phases")),
'kV': float(dss.Properties.Value("kV")),
'kVA': float(dss.Properties.Value("kVA")),
'kW': float(dss.Properties.Value("kW")),
'pf': dss.Properties.Value("pf"),
'Bus1': dss.Properties.Value("bus1"),
'Powers': dss.CktElement.Powers(),
'NetPower': sum(dss.CktElement.Powers()[::2]),
}
# Move on to the next ...
flag = dss.ActiveClass.Next()
storage_df = pd.DataFrame.from_dict(storage_dict, "index")
return storage_df
def get_present_pvgeneration():
""" Get present generation for all pv systems
Returns
-------
DataFrame
"""
pv_dict = {}
dss.Circuit.SetActiveClass("PVSystem")
flag = dss.ActiveClass.First()
while flag:
pv_dict[dss.CktElement.Name()] = {
'Num_phases': float(dss.Properties.Value("phases")),
'kV': float(dss.Properties.Value("kV")),
'kVA': float(dss.Properties.Value("kVA")),
'kvar': float(dss.Properties.Value("kvar")),
'Irradiance': float(dss.Properties.Value("Irradiance")),
'connection': dss.Properties.Value("conn"),
'Pmpp': float(dss.Properties.Value("Pmpp")),
'Powers': dss.CktElement.Powers(),
'NetPower': sum(dss.CktElement.Powers()[::2]),
'pf': dss.Properties.Value("pf"),
'Bus1': dss.Properties.Value("bus1"),
'Voltages': dss.CktElement.Voltages(),
'VoltagesMagAng': dss.CktElement.VoltagesMagAng(),
'VoltagesMag': float(dss.CktElement.VoltagesMagAng()[0]),
}
flag = dss.ActiveClass.Next() > 0
pv_df = pd.DataFrame.from_dict(pv_dict, "index")
return pv_df
def get_all_transformer_info_instance(upper_limit=None, compute_loading=True):
"""This collects transformer information
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("transformer")
if len(all_df) == 0:
return pd.DataFrame()
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
# extract only enabled lines
all_df = all_df.loc[all_df["enabled"] == True]
all_df[["wdg", "phases"]] = all_df[["wdg", "phases"]].astype(int)
float_fields = ["kV", "kVA", "normhkVA", "emerghkVA", "%loadloss", "%noloadloss", "XHL", "XHT", "XLT", "%R",
"Rneut", "Xneut", "X12", "X13", "X23", "RdcOhms"]
all_df[float_fields] = all_df[float_fields].astype(float)
# define empty new columns
all_df['bus_names_only'] = None
all_df["amp_limit_per_phase"] = np.nan
if compute_loading:
all_df["max_amp_loading"] = np.nan
all_df["max_per_unit_loading"] = np.nan
all_df["status"] = ""
for index, row in all_df.iterrows():
# convert type from list to tuple since they are hashable objects (and can be indexed)
all_df.at[index, "kVs"] = [float(a) for a in row["kVs"]]
all_df.at[index, "kVAs"] = [float(a) for a in row["kVAs"]]
all_df.at[index, "Xscarray"] = [float(a) for a in row["Xscarray"]]
all_df.at[index, "%Rs"] = [float(a) for a in row["%Rs"]]
all_df.at[index, "bus_names_only"] = [a.split(".")[0].lower() for a in row["buses"]]
# first winding is considered primary winding
primary_kv = float(row["kVs"][0])
primary_kva = float(row["kVAs"][0])
if row["phases"] > 1:
amp_limit_per_phase = primary_kva / (primary_kv * math.sqrt(3))
elif row["phases"] == 1:
amp_limit_per_phase = primary_kva / primary_kv
else:
raise InvalidOpenDssElementError(f"Incorrect number of phases for transformer {row['name']}")
all_df.at[index, "amp_limit_per_phase"] = amp_limit_per_phase
if compute_loading:
if upper_limit is None:
raise Exception("Transformer upper limit is to be passed to function to compute transformer loading")
dss.Circuit.SetActiveElement("Transformer.{}".format(row["name"]))
extract_magang = dss.CktElement.CurrentsMagAng()[: 2 * row["phases"]] # extract elements based on num of ph
xfmr_current_magnitude = extract_magang[::2]
max_amp_loading = max(xfmr_current_magnitude)
max_per_unit_loading = round(max_amp_loading / amp_limit_per_phase, 4)
all_df.at[index, "max_amp_loading"] = max_amp_loading
all_df.at[index, "max_per_unit_loading"] = max_per_unit_loading
if max_per_unit_loading > upper_limit:
all_df.at[index, "status"] = "overloaded"
elif max_per_unit_loading == 0:
all_df.at[index, "status"] = "unloaded"
else:
all_df.at[index, "status"] = "normal"
# convert lists to string type (so they can be set as dataframe index later)
all_df[['conns', 'kVs']] = all_df[['conns', 'kVs']].astype(str)
all_df = all_df.reset_index(drop=True).set_index('name')
return all_df.reset_index()
def add_info_line_definition_type(all_df):
all_df["line_definition_type"] = "line_definition"
all_df.loc[all_df["linecode"] != "", "line_definition_type"] = "linecode"
all_df.loc[all_df["geometry"] != "", "line_definition_type"] = "geometry"
return all_df
def determine_line_placement(line_series):
""" Distinguish between overhead and underground cables
currently there is no way to distinguish directy using opendssdirect/pydss etc.
It is done here using property 'height' parameter and if string present in name
Parameters
----------
line_series
Returns
-------
dict
"""
info_dict = {}
info_dict["line_placement"] = None
if line_series["line_definition_type"] == "geometry":
dss.Circuit.SetActiveClass("linegeometry")
dss.ActiveClass.Name(line_series["geometry"])
h = float(dss.Properties.Value("h"))
info_dict["h"] = 0
if h >= 0:
info_dict["line_placement"] = "overhead"
else:
info_dict["line_placement"] = "underground"
else:
if ("oh" in line_series["geometry"].lower()) or ("oh" in line_series["linecode"].lower()):
info_dict["line_placement"] = "overhead"
elif ("ug" in line_series["geometry"].lower()) or ("ug" in line_series["linecode"].lower()):
info_dict["line_placement"] = "underground"
else:
info_dict["line_placement"] = None
return info_dict
def get_all_line_info_instance(upper_limit=None, compute_loading=True, ignore_switch=True):
"""This collects line information
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("line")
if len(all_df) == 0:
return pd.DataFrame()
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
# extract only enabled lines
all_df = all_df.loc[all_df["enabled"] == True]
all_df["phases"] = all_df["phases"].astype(int)
all_df[["normamps", "length"]] = all_df[["normamps", "length"]].astype(float)
all_df = add_info_line_definition_type(all_df)
# define empty new columns
all_df["kV"] = np.nan
all_df["h"] = np.nan
all_df["line_placement"] = ""
if compute_loading:
all_df["max_amp_loading"] = np.nan
all_df["max_per_unit_loading"] = np.nan
all_df["status"] = ""
for index, row in all_df.iterrows():
dss.Circuit.SetActiveBus(row["bus1"])
kv_b1 = dss.Bus.kVBase()
dss.Circuit.SetActiveBus(row["bus2"])
kv_b2 = dss.Bus.kVBase()
dss.Circuit.SetActiveElement("Line.{}".format(row["name"]))
if round(kv_b1) != round(kv_b2):
raise InvalidOpenDssElementError("To and from bus voltages ({} {}) do not match for line {}".format(
kv_b2, kv_b1, row['name']))
all_df.at[index, "kV"] = kv_b1
# Distinguish between overhead and underground cables
# currently there is no way to distinguish directy using opendssdirect/pydss etc.
# It is done here using property 'height' parameter and if string present in name
placement_dict = determine_line_placement(row)
for key in placement_dict.keys():
all_df.at[index, key] = placement_dict[key]
# if line loading is to be computed
if compute_loading:
if upper_limit is None:
raise Exception("Line upper limit is to be passed to function to compute line loading")
dss.Circuit.SetActiveElement("Line.{}".format(row["name"]))
extract_magang = dss.CktElement.CurrentsMagAng()[: 2 * row["phases"]]
line_current = extract_magang[::2]
max_amp_loading = max(line_current)
max_per_unit_loading = round(max_amp_loading / row["normamps"], 4)
all_df.at[index, "max_amp_loading"] = max_amp_loading
all_df.at[index, "max_per_unit_loading"] = max_per_unit_loading
if max_per_unit_loading > upper_limit:
all_df.at[index, "status"] = "overloaded"
elif max_per_unit_loading == 0:
all_df.at[index, "status"] = "unloaded"
else:
all_df.at[index, "status"] = "normal"
all_df = all_df.reset_index(drop=True).set_index('name')
all_df["kV"] = all_df["kV"].round(5)
# add units to switch length (needed to plot graph). By default, length of switch is taken as max
all_df.loc[(all_df.units == 'none') & (all_df.Switch == True), 'units'] = 'm'
# if switch is to be ignored
if ignore_switch:
all_df = all_df.loc[all_df['Switch'] == False]
return all_df.reset_index()
def compare_multiple_dataframes(comparison_dict, deciding_column_name, comparison_type="max"):
"""This function compares all dataframes in a given dictionary based on a deciding column name
Returns
-------
Dataframe
"""
summary_df = pd.DataFrame()
for df_name in comparison_dict.keys():
summary_df[df_name] = comparison_dict[df_name][deciding_column_name]
if comparison_type == "max":
label_df = summary_df.idxmax(axis=1) # find dataframe name that has max
elif comparison_type == "min":
label_df = summary_df.idxmax(axis=1) # find dataframe name that has min
else:
raise Exception(f"Unknown comparison type {comparison_type} passed.")
final_list = []
for index, label in label_df.iteritems(): # index is element name
temp_dict = dict(comparison_dict[label].loc[index])
temp_dict.update({"name": index})
final_list.append(temp_dict)
final_df = pd.DataFrame(final_list)
return final_df
@track_timing(timer_stats_collector)
def get_thermal_equipment_info(compute_loading, equipment_type, upper_limit=None, ignore_switch=False, **kwargs):
"""This function determines the thermal equipment loading (line, transformer), based on timepoint multiplier
Returns
-------
DataFrame
"""
timepoint_multipliers = kwargs.get("timepoint_multipliers", None)
multiplier_type = kwargs.get("multiplier_type", LoadMultiplierType.ORIGINAL)
# if there are no multipliers, run on rated load i.e. multiplier=1. 0
# if compute_loading is false, then just run once (no need to check multipliers)
if (timepoint_multipliers is None) or (not compute_loading) or (multiplier_type == LoadMultiplierType.ORIGINAL):
if compute_loading and multiplier_type != LoadMultiplierType.ORIGINAL:
apply_uniform_timepoint_multipliers(multiplier_name=1, field="with_pv", **kwargs)
if equipment_type == "line":
loading_df = get_all_line_info_instance(compute_loading=compute_loading, upper_limit=upper_limit, ignore_switch=ignore_switch)
elif equipment_type == "transformer":
loading_df = get_all_transformer_info_instance(compute_loading=compute_loading, upper_limit=upper_limit)
return loading_df
if multiplier_type == LoadMultiplierType.UNIFORM:
comparison_dict = {}
for pv_field in timepoint_multipliers["load_multipliers"].keys():
logger.debug(pv_field)
for multiplier_name in timepoint_multipliers["load_multipliers"][pv_field]:
logger.debug("Multipler name: %s", multiplier_name)
# this changes the dss network load and pv
apply_uniform_timepoint_multipliers(multiplier_name=multiplier_name, field=pv_field, **kwargs)
if equipment_type.lower() == "line":
deciding_column_name = "max_per_unit_loading"
loading_df = get_all_line_info_instance(compute_loading=compute_loading, upper_limit=upper_limit, ignore_switch=ignore_switch)
elif equipment_type.lower() == "transformer":
deciding_column_name = "max_per_unit_loading"
loading_df = get_all_transformer_info_instance(compute_loading=compute_loading, upper_limit=upper_limit)
loading_df.set_index("name", inplace=True)
comparison_dict[pv_field+"_"+str(multiplier_name)] = loading_df
# compare all dataframe, and create one that contains all worst loading conditions (across all multiplier conditions)
loading_df = compare_multiple_dataframes(comparison_dict, deciding_column_name, comparison_type="max")
else:
raise Exception(f"Undefined multiplier_type {multiplier_type} passed.")
return loading_df
def get_regcontrol_info(correct_PT_ratio=False, nominal_voltage=None):
"""This collects enabled regulator control information.
If correcting PT ratio, the following information is followed (based on OpenDSS documentation)
PT ratio: # If the winding is Wye, the line-to-neutral voltage is used. Else, the line-to-line voltage is used.
# Here, bus kV is taken from Bus.kVBase
Bus base kV: Returns L-L voltages for 2- and 3-phase. Else for 1-ph, return L-N voltage
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("regcontrol")
if len(all_df) == 0:
return pd.DataFrame()
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
float_columns = ['winding', 'vreg', 'band', 'ptratio', 'delay']
all_df[float_columns] = all_df[float_columns].astype(float)
all_df['at_substation_xfmr_flag'] = False # by default, reg control is considered to be not at substation xfmr
ckt_info_dict = get_circuit_info()
sub_xfmr_present = False
sub_xfmr_name = None
if ckt_info_dict['substation_xfmr'] is not None:
sub_xfmr_present = True
sub_xfmr_name = ckt_info_dict['substation_xfmr']['name']
if correct_PT_ratio:
if nominal_voltage is None:
raise Exception("Nominal voltage not provided to correct regcontrol PT ratio.")
all_df['old_ptratio'] = all_df['ptratio']
for index, row in all_df.iterrows():
dss.Circuit.SetActiveElement("Regcontrol.{}".format(row["name"]))
reg_bus = dss.CktElement.BusNames()[0].split(".")[0]
all_df.at[index, "reg_bus"] = reg_bus
dss.Circuit.SetActiveBus(reg_bus)
all_df.at[index, "bus_num_phases"] = dss.CktElement.NumPhases()
all_df.at[index, "bus_kv"] = dss.Bus.kVBase()
dss.Circuit.SetActiveElement("Transformer.{}".format(row["transformer"]))
all_df.at[index, "transformer_kva"] = float(dss.Properties.Value("kva"))
dss.Transformers.Wdg(1) # setting winding to 1, to get kV for winding 1
all_df.at[index, "transformer_kv"] = dss.Transformers.kV()
all_df.at[index, "transformer_conn"] = dss.Properties.Value("conn").replace(" ", "") # opendss returns conn with a space
all_df.at[index, "transformer_bus1"] = dss.CktElement.BusNames()[0].split(".")[0]
all_df.at[index, "transformer_bus2"] = dss.CktElement.BusNames()[1].split(".")[0]
if correct_PT_ratio:
if (all_df.loc[index]["bus_num_phases"] > 1) and (all_df.loc[index]["transformer_conn"].lower() == "wye"):
kV_to_be_used = all_df.loc[index]["transformer_kv"] * 1000 / math.sqrt(3)
else:
kV_to_be_used = all_df.loc[index]["transformer_kv"] * 1000
# kV_to_be_used = dss.Bus.kVBase() * 1000
all_df.at[index, "ptratio"] = kV_to_be_used / nominal_voltage
if sub_xfmr_present and (row["transformer"] == sub_xfmr_name): # if reg control is at substation xfmr
all_df.at[index, 'at_substation_xfmr_flag'] = True
all_df = all_df.reset_index(drop=True).set_index('name')
all_df = all_df.loc[all_df['enabled'] == True]
return all_df.reset_index()
def get_capacitor_info(nominal_voltage=None, correct_PT_ratio=False):
"""
This collects capacitor information.
For correcting PT ratio, the following information and definitions are followed:
# cap banks are 3 phase, 2 phase or 1 phase. 1 phase caps will have LN voltage
# PT ratio: Ratio of the PT that converts the monitored voltage to the control voltage.
# If the capacitor is Wye, the 1st phase line-to-neutral voltage is monitored.
# Else, the line-to-line voltage (1st - 2nd phase) is monitored.
# Capacitor kv: Rated kV of the capacitor (not necessarily same as bus rating).
# For Phases=2 or Phases=3, it is line-to-line (phase-to-phase) rated voltage.
# For all other numbers of phases, it is actual rating. (For Delta connection this is always line-to-line rated voltage).
This function doesnt currently check if object is "enabled".
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("capacitor")
if len(all_df) == 0:
return pd.DataFrame()
all_df["capacitor_name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
float_columns = ["phases", "kv"]
all_df[float_columns] = all_df[float_columns].astype(float)
all_df = all_df.reset_index(drop=True).set_index("capacitor_name")
# collect capcontrol information to combine with capcontrols
capcontrol_df = get_cap_control_info()
capcontrol_df.rename(columns={'name': 'capcontrol_name', 'capacitor': 'capacitor_name', 'type': 'capcontrol_type',
'equipment_type': 'capcontrol_present'}, inplace=True)
capcontrol_df = capcontrol_df.set_index("capacitor_name")
# with capacitor name as index, concatenate capacitor information with cap controls
# TODO are any other checks needed before concatenating dataframes? i.e. if capacitor is not present
all_df = pd.concat([all_df, capcontrol_df], axis=1)
all_df.index.name = 'capacitor_name'
all_df = all_df.reset_index().set_index('capacitor_name')
if correct_PT_ratio and (len(capcontrol_df) > 0):
if nominal_voltage is None:
raise Exception("Nominal voltage not provided to correct capacitor bank PT ratio.")
all_df['old_PTratio'] = all_df['PTratio']
# iterate over all capacitors
for index, row in all_df.iterrows():
all_df.at[index, "kvar"] = [float(a) for a in row["kvar"]][0]
# if capcontrol type is empty, then that capacitor does not have controls
# correct PT ratios for existing cap controls
if correct_PT_ratio and (len(capcontrol_df) > 0):
if row["phases"] > 1 and row["conn"].lower() == "wye":
kv_to_be_used = (row['kv'] * 1000) / math.sqrt(3)
else:
kv_to_be_used = row['kv'] * 1000
all_df.at[index, "PTratio"] = kv_to_be_used / nominal_voltage
return all_df.reset_index()
def get_cap_control_info():
"""This collects capacitor control information
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("capcontrol")
if len(all_df) == 0:
capcontrol_columns = ['name', 'capacitor', 'type', 'equipment_type']
return pd.DataFrame(columns=capcontrol_columns)
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
float_columns = ["CTPhase", "CTratio", "DeadTime", "Delay", "DelayOFF", "OFFsetting", "ONsetting", "PTratio",
"Vmax", "Vmin"]
all_df[float_columns] = all_df[float_columns].astype(float)
all_df = all_df.reset_index(drop=True).set_index("name")
return all_df.reset_index()
def get_line_geometry():
"""This collects all line geometry information
Returns
-------
DataFrame
"""
active_class_name = 'linegeometry'
all_df = dss.utils.class_to_dataframe(active_class_name)
if len(all_df) == 0:
return pd.DataFrame()
all_df['name'] = all_df.index.str.split('.').str[1]
all_df['equipment_type'] = all_df.index.str.split('.').str[0]
all_df.reset_index(inplace=True, drop=True)
return all_df
def get_line_code():
"""This collects all line codes information
Returns
-------
DataFrame
"""
active_class_name = 'linecode'
all_df = dss.utils.class_to_dataframe(active_class_name)
if len(all_df) == 0:
return | pd.DataFrame() | pandas.DataFrame |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
| pd.Period("2011-01", freq="M") | pandas.Period |
import pycurl
from io import BytesIO
import json
import datetime
import pandas as pd
myaddress = input('Enter Bitcoin Address: ')
btcval = 100000000.0 # in santoshis
block_time_in_min = 10
block_time_in_sec = block_time_in_min*60
def getBalance(address: str):
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/unspent?active=%s" % (address))
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
balance = 0.0
# print("getreq = %s" % (getreq.getvalue()))
allunspenttx = json.loads(strbuf.getvalue())['unspent_outputs']
for eachtx in allunspenttx:
balance += eachtx['value']
return balance
def getTxnHistoryOfAddress(address: str):
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/address/%s?format=json" % (address))
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
new_txn_list = []
alltxns = json.loads(strbuf.getvalue())['txs']
for eachtxn in alltxns:
new_txn = {}
input_list = eachtxn['inputs']
input_value = 0
address_input_value = 0
for each_input in input_list:
input_value += each_input['prev_out']['value']
if each_input['prev_out']['addr'] == address:
address_input_value += each_input['prev_out']['value']
output_list = eachtxn['out']
output_value = 0
address_output_value = 0
for each_output in output_list:
output_value += each_output['value']
if each_output['addr'] == address:
address_output_value += each_output['value']
if address_input_value > address_output_value:
new_txn['credit_in_btc'] = (address_input_value - address_output_value) / btcval
else:
new_txn['debit_in_btc'] = (address_output_value - address_input_value) / btcval
network_fees = input_value - output_value
new_txn['network_fees'] = network_fees / btcval
new_txn['network_fees_in_inr'] = new_txn['network_fees'] * getCurrentSellPriceInInr()
dt = datetime.datetime.fromtimestamp(eachtxn['time'])
new_txn['date_time'] = dt.strftime("%d-%B-%Y %H:%M:%S")
new_txn_list.append(new_txn)
return new_txn_list
def getCurrentBlockHeight():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/blocks?format=json")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
current_block_height = json.loads(strbuf.getvalue())['blocks'][0]['height']
return current_block_height
def getTxCountInBlock(block_height: int):
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/block-height/%d?format=json" % (block_height))
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
txlist = json.loads(strbuf.getvalue())['blocks'][0]['tx']
return len(txlist)
#def getListOfTxnsOnAddress(address: str):
#
#def getInputBitcoinInTx(txn: str):
#
#def getOutputBitcoinInTx(txn: str):
#
#def getChangeInTx(txn: str):
#
#def getNetworkFeesInTxn(txn: str):
def getTxRate(tx_count_in_block: int):
return tx_count_in_block/block_time_in_sec
# return block_time_in_sec/tx_count_in_block
def getAverageTxRateInLast24Hrs():
current_block_height = getCurrentBlockHeight()
min_in_a_day = 60*24
blocks_in_a_day = int(min_in_a_day/block_time_in_min)
sum_tx_rate = 0
print("Transaction rate::")
for block_height in range(current_block_height - blocks_in_a_day, current_block_height):
tx_count = getTxCountInBlock(block_height)
tx_rate = getTxRate(tx_count)
sum_tx_rate += tx_rate
print("block height %d ===> Tx Rate %.6f" % (block_height, tx_rate))
average_tx_rate = sum_tx_rate / blocks_in_a_day
return average_tx_rate
def getAverageTxRateInLastWeek():
current_block_height = getCurrentBlockHeight()
min_in_a_week = 60*24*7
blocks_in_a_week = int(min_in_a_week/block_time_in_min)
sum_tx_rate = 0
print("Transaction rate::")
for block_height in range(current_block_height - blocks_in_a_week, current_block_height):
tx_count = getTxCountInBlock(block_height)
tx_rate = getTxRate(tx_count)
sum_tx_rate += tx_rate
print("block height %d ===> Tx Rate %.6f" % (block_height, tx_rate))
average_tx_rate = sum_tx_rate / blocks_in_a_week
return average_tx_rate
def getAverageTxRateInLastMonth():
current_block_height = getCurrentBlockHeight()
min_in_a_month = 60*24*7
blocks_in_a_month = int(min_in_a_month/block_time_in_min)
sum_tx_rate = 0
print("Transaction rate::")
for block_height in range(current_block_height - blocks_in_a_month, current_block_height):
tx_count = getTxCountInBlock(block_height)
tx_rate = getTxRate(tx_count)
sum_tx_rate += tx_rate
print("block height %d ===> Tx Rate %.6f" % (block_height, tx_rate))
average_tx_rate = sum_tx_rate / blocks_in_a_month
return average_tx_rate
def getCurrentNetworkHashRate():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/q/hashrate")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: text/plain'])
getreq.perform()
getreq.close()
current_network_hash_rate = int(strbuf.getvalue()) * 10**9
return current_network_hash_rate
def getCurrentBlockReward():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/q/bcperblock")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: text/plain'])
getreq.perform()
getreq.close()
block_reward_abs = int(strbuf.getvalue())
block_reward = block_reward_abs / btcval
return block_reward
def getCurrentBuyPriceInInr():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://www.zebapi.com/api/v1/market/ticker-new/btc/inr")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
current_buy_rate_in_inr = int(json.loads(strbuf.getvalue())['buy'])
return current_buy_rate_in_inr
def getCurrentSellPriceInInr():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://www.zebapi.com/api/v1/market/ticker-new/btc/inr")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
current_buy_rate_in_inr = int(json.loads(strbuf.getvalue())['sell'])
return current_buy_rate_in_inr
def getCurrentValueOfBitcoinInAddressInInr(address: str):
btc = getBalance(address) / btcval
price_in_inr = getCurrentSellPriceInInr()
value = btc * price_in_inr
return value
def getUnconfirmedTransactionCount():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/q/unconfirmedcount")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
unconfirmed_transaction_count = int(strbuf.getvalue())
return unconfirmed_transaction_count
def convertToRupeeFormat(num: float):
numstr = "%.2f" % (num)
# print("numstr = %s" % (numstr))
# print("numstr len = %s" % (len(numstr)))
commaloc = 6
while commaloc < len(numstr):
numstr = numstr[:-commaloc] + ',' + numstr[-commaloc:]
commaloc += 3
rupees = "\u20B9%s" % (numstr)
return rupees
electricity_rates = {"rate_slabs": [{"min": 1, "max": 30, "unit_price": 3.25}, {"min": 31, "max": 100, "unit_price": 4.7}, {"min": 101, "max": 200, "unit_price": 6.25}, {"min": 201, "unit_price": 7.3}]}
def getPriceFromUnit(unit: float):
rate_slabs = electricity_rates['rate_slabs']
price = 0
for slab in rate_slabs:
if slab['min'] > unit:
countinue
elif ('max' in slab and slab['max']) > unit or 'max' not in slab:
# if 'max' in slab:
# print("min = %.2f, max = %.2f, unit = %.2f" % (slab['min'], slab['max'], unit))
# else:
# print("min = %.2f, unit = %.2f" % (slab['min'], unit))
price += (unit - slab['min']) * slab['unit_price']
else:
price += (slab['max'] - slab['min']) * slab['unit_price']
return price
def getUnitFromPower(power: float):
unit = power * 24 * 30 / 1000
return unit
def getBlockMiningRatePer10Min(hashrate: int):
network_hashrate = getCurrentNetworkHashRate()
block_mining_rate = hashrate/network_hashrate
return block_mining_rate
def getBitcoinMiningRate(hashrate: int):
block_mining_rate = getBlockMiningRatePer10Min(hashrate)
mining_reward = getCurrentBlockReward()
bitcoin_mining_rate = block_mining_rate * mining_reward
return bitcoin_mining_rate
def getMiningPowerExpense(power: float):
unit = getUnitFromPower(power)
expense = getPriceFromUnit(unit)
return expense
def getBitcoinMinedPerMonth(hashrate: int):
bitcoin_mined_per_month = getBitcoinMiningRate(hashrate) * 6 * 24 * 30
return bitcoin_mined_per_month
def miningReturn(power: float, hashrate: int):
expense = getMiningPowerExpense(power)
bitcoin_mined_per_month = getBitcoinMinedPerMonth(hashrate)
revenue = bitcoin_mined_per_month * getCurrentSellPriceInInr()
profit = revenue - expense
return profit
def costOfMiningBitcoin(power: float, hashrate: int):
unit = getUnitFromPower(power)
price_per_month = getPriceFromUnit(unit)
bitcoin_mined_per_month = getBitcoinMiningRate(hashrate) * 6 * 24 * 30
cost_of_mining_bitcoin = price_per_month/bitcoin_mined_per_month
return cost_of_mining_bitcoin
if __name__ == "__main__":
balance = getBalance(myaddress) / btcval
print("Current Bitcoin balance = %.8f at Address = %s" % (balance, myaddress))
value = getCurrentValueOfBitcoinInAddressInInr(myaddress)
print("Current Value of Bitcoin = %.2f for Address = %s" % (value, myaddress))
current_block_height = getCurrentBlockHeight()
print("current block height = %d" % (current_block_height))
tx_count_in_last_block = getTxCountInBlock(current_block_height)
print("Number of transactions in last block = %d" % (tx_count_in_last_block))
tx_rate = getTxRate(tx_count_in_last_block)
print("Current transaction rate = %.6f" % (tx_rate))
# average_tx_rate = getAverageTxRateInLast24Hrs()
# print("Average Transaction Rate in last 24 Hrs = %.6f" % (average_tx_rate))
current_network_hash_rate = getCurrentNetworkHashRate()
print("Current Network Hash Rate = %d" % (current_network_hash_rate))
block_reward = getCurrentBlockReward()
print("Current Block Reward = %.8f" % (block_reward))
current_buy_rate_in_inr = getCurrentBuyPriceInInr()
current_buy_rate_in_rupees = convertToRupeeFormat(current_buy_rate_in_inr)
print("Current Buy Rate in Indian Rupees = %s" % (current_buy_rate_in_rupees))
miner_hashrate = 13.5 * 10**12
print("Miner hashrate = %d" % (miner_hashrate))
miner_power = 1323
print ("Miner Power in Watt = %f" % (miner_power))
expense = getMiningPowerExpense(miner_power)
print ("Miner Power Expense Per Month = %.2f" % (expense))
bitcoin_mined_per_month = getBitcoinMinedPerMonth(miner_hashrate)
print("Bitcoin Mined Per Month = %.8f from Miner with hashrate = %d" % (bitcoin_mined_per_month, miner_hashrate))
mining_return = miningReturn(miner_power, miner_hashrate)
print("Mining Return Per Month = %s" % (mining_return))
cost_of_mining_bitcoin = costOfMiningBitcoin(miner_power, miner_hashrate)
print("Cost of Mining Bitcoin = %.2f" % (cost_of_mining_bitcoin))
unconfirmed_transaction_count = getUnconfirmedTransactionCount()
print("Total Unconfirmed Transaction Count = %d" % (unconfirmed_transaction_count))
txn_history = getTxnHistoryOfAddress(myaddress)
txn_history_table = | pd.DataFrame(txn_history) | pandas.DataFrame |
"""
October 2020
Updated: August 2021
Software version: Python 3.7
This code retrieves the calculation of building material demand and embodied greenhouse gas emissions in 26 global regions between 2020-2060. For the original code & latest updates, see: https://github.com/oucxiaoyang/GloBUME
The building material model is based on the BUMA model developed by <NAME>, Leiden University, the Netherlands. For the original code & latest updates, see: https://github.com/SPDeetman/BUMA
The dynamic stock model is based on the ODYM model developed by <NAME>, Uni Freiburg, Germany. For the original code & latest updates, see: https://github.com/IndEcol/ODYM
@author: <NAME>; <EMAIL>
<NAME>; <EMAIL>
<NAME>; <EMAIL>
contributions from: <NAME>
*NOTE: Insert location of the GloBUME-main folder in 'dir_path' (line 28) to run the code.
"""
#%% GENERAL SETTING & STATEMENTS
import pandas as pd
import numpy as np
import os
import ctypes
import math
# set current directory
dir_path = ""
os.chdir(dir_path)
# Set general constants
regions = 26 #26 IMAGE regions
res_building_types = 4 #4 residential building types: detached, semi-detached, appartments & high-rise
area = 2 #2 areas: rural & urban
materials = 7 #7 materials: Steel, brick, Concrete, Wood, Copper, Aluminium, Glass
inflation = 1.2423 # gdp/cap inflation correction between 2005 (IMAGE data) & 2016 (commercial calibration) according to https://www.bls.gov/data/inflation_calculator.htm
# Set Flags for sensitivity analysis
flag_alpha = 0 # switch for the sensitivity analysis on alpha, if 1 the maximum alpha is 10% above the maximum found in the data
flag_ExpDec = 0 # switch to choose between Gompertz and Exponential Decay function for commercial floorspace demand (0 = Gompertz, 1 = Expdec)
flag_Normal = 0 # switch to choose between Weibull and Normal lifetime distributions (0 = Weibull, 1 = Normal)
flag_Mean = 0 # switch to choose between material intensity settings (0 = regular regional, 1 = mean, 2 = high, 3 = low, 4 = median)
#%%Load files & arrange tables ----------------------------------------------------
if flag_Mean == 0:
file_addition = ''
elif flag_Mean == 1:
file_addition = '_mean'
elif flag_Mean == 2:
file_addition = '_high'
elif flag_Mean == 3:
file_addition = '_low'
else:
file_addition = '_median'
# Load Population, Floor area, and Service value added (SVA) Database csv-files
pop = pd.read_csv('files_population/pop.csv', index_col = [0]) # Pop; unit: million of people; meaning: global population (over time, by region)
rurpop = pd.read_csv('files_population/rurpop.csv', index_col = [0]) # rurpop; unit: %; meaning: the share of people living in rural areas (over time, by region)
housing_type = pd.read_csv('files_population\Housing_type.csv') # Housing_type; unit: %; meaning: the share of the NUMBER OF PEOPLE living in a particular building type (by region & by area)
floorspace = pd.read_csv('files_floor_area/res_Floorspace.csv') # Floorspace; unit: m2/capita; meaning: the average m2 per capita (over time, by region & area)
floorspace = floorspace[floorspace.Region != regions + 1] # Remove empty region 27
avg_m2_cap = pd.read_csv('files_floor_area\Average_m2_per_cap.csv') # Avg_m2_cap; unit: m2/capita; meaning: average square meters per person (by region & area (rural/urban) & building type)
sva_pc_2005 = pd.read_csv('files_GDP/sva_pc.csv', index_col = [0])
sva_pc = sva_pc_2005 * inflation # we use the inflation corrected SVA to adjust for the fact that IMAGE provides gdp/cap in 2005 US$
# load material density data csv-files
building_materials = pd.read_csv('files_material_density\Building_materials' + file_addition + '.csv') # Building_materials; unit: kg/m2; meaning: the average material use per square meter (by building type, by region & by area)
materials_commercial = pd.read_csv('files_material_density\materials_commercial' + file_addition + '.csv') # 7 building materials in 4 commercial building types; unit: kg/m2; meaning: the average material use per square meter (by commercial building type)
# Load fitted regression parameters for comercial floor area estimate
if flag_alpha == 0:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters.csv', index_col = [0])
else:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters_alpha.csv', index_col = [0])
# Ensure full time series for pop & rurpop (interpolation, some years are missing)
rurpop2 = rurpop.reindex(list(range(1970,2061,1))).interpolate()
pop2 = pop.reindex(list(range(1970,2061,1))).interpolate()
# Remove 1st year, to ensure same Table size as floorspace data (from 1971)
pop2 = pop2.iloc[1:]
rurpop2 = rurpop2.iloc[1:]
# pre-calculate urban population
urbpop = 1 - rurpop2 # urban population is 1 - the fraction of people living in rural areas (rurpop)
# Restructure the tables to regions as columns; for floorspace
floorspace_rur = floorspace.pivot(index = "t", columns = "Region", values = "Rural")
floorspace_urb = floorspace.pivot(index = "t", columns = "Region", values = "Urban")
# Restructuring for square meters (m2/cap)
avg_m2_cap_urb = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_urb.columns = list(map(int,avg_m2_cap_urb.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_urb2 = avg_m2_cap_urb.drop(['Region']) # Remove idle row
avg_m2_cap_rur = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_rur.columns = list(map(int,avg_m2_cap_rur.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_rur2 = avg_m2_cap_rur.drop(['Region']) # Remove idle row
# Restructuring for the Housing types (% of population living in them)
housing_type_urb = housing_type.loc[housing_type['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
housing_type_urb.columns = list(map(int,housing_type_urb.iloc[0])) # name columns according to the row containing the region-labels
housing_type_urb2 = housing_type_urb.drop(['Region']) # Remove idle row
housing_type_rur = housing_type.loc[housing_type['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
housing_type_rur.columns = list(map(int,housing_type_rur.iloc[0])) # name columns according to the row containing the region-labels
housing_type_rur2 = housing_type_rur.drop(['Region']) # Remove idle row
#%% COMMERCIAL building space demand (stock) calculated from Gomperz curve (fitted, using separate regression model)
# Select gompertz curve paramaters for the total commercial m2 demand (stock)
alpha = gompertz['All']['a'] if flag_ExpDec == 0 else 25.601
beta = gompertz['All']['b'] if flag_ExpDec == 0 else 28.431
gamma = gompertz['All']['c'] if flag_ExpDec == 0 else 0.0415
# find the total commercial m2 stock (in Millions of m2)
commercial_m2_cap = pd.DataFrame(index = range(1971,2061), columns = range(1,27))
for year in range(1971,2061):
for region in range(1,27):
if flag_ExpDec == 0:
commercial_m2_cap[region][year] = alpha * math.exp(-beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
else:
commercial_m2_cap[region][year] = max(0.542, alpha - beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
# Subdivide the total across Offices, Retail+, Govt+ & Hotels+
commercial_m2_cap_office = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Offices
commercial_m2_cap_retail = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Retail & Warehouses
commercial_m2_cap_hotels = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Hotels & Restaurants
commercial_m2_cap_govern = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Hospitals, Education, Government & Transportation
minimum_com_office = 25
minimum_com_retail = 25
minimum_com_hotels = 25
minimum_com_govern = 25
for year in range(1971,2061):
for region in range(1,27):
# get the square meter per capita floorspace for 4 commercial applications
office = gompertz['Office']['a'] * math.exp(-gompertz['Office']['b'] * math.exp((-gompertz['Office']['c']/1000) * sva_pc[str(region)][year]))
retail = gompertz['Retail+']['a'] * math.exp(-gompertz['Retail+']['b'] * math.exp((-gompertz['Retail+']['c']/1000) * sva_pc[str(region)][year]))
hotels = gompertz['Hotels+']['a'] * math.exp(-gompertz['Hotels+']['b'] * math.exp((-gompertz['Hotels+']['c']/1000) * sva_pc[str(region)][year]))
govern = gompertz['Govt+']['a'] * math.exp(-gompertz['Govt+']['b'] * math.exp((-gompertz['Govt+']['c']/1000) * sva_pc[str(region)][year]))
#calculate minimum values for later use in historic tail(Region 20: China @ 134 $/cap SVA)
minimum_com_office = office if office < minimum_com_office else minimum_com_office
minimum_com_retail = retail if retail < minimum_com_retail else minimum_com_retail
minimum_com_hotels = hotels if hotels < minimum_com_hotels else minimum_com_hotels
minimum_com_govern = govern if govern < minimum_com_govern else minimum_com_govern
# Then use the ratio's to subdivide the total commercial floorspace into 4 categories
commercial_sum = office + retail + hotels + govern
commercial_m2_cap_office[region][year] = commercial_m2_cap[region][year] * (office/commercial_sum)
commercial_m2_cap_retail[region][year] = commercial_m2_cap[region][year] * (retail/commercial_sum)
commercial_m2_cap_hotels[region][year] = commercial_m2_cap[region][year] * (hotels/commercial_sum)
commercial_m2_cap_govern[region][year] = commercial_m2_cap[region][year] * (govern/commercial_sum)
#%% Add historic tail (1720-1970) + 100 yr initial -----------------------------------------------------------
# load historic population development
hist_pop = pd.read_csv('files_initial_stock\hist_pop.csv', index_col = [0]) # initial population as a percentage of the 1970 population; unit: %; according to the Maddison Project Database (MPD) 2018 (Groningen University)
# Determine the historical average global trend in floorspace/cap & the regional rural population share based on the last 10 years of IMAGE data
floorspace_urb_trend_by_region = [0 for j in range(0,26)]
floorspace_rur_trend_by_region = [0 for j in range(0,26)]
rurpop_trend_by_region = [0 for j in range(0,26)]
commercial_m2_cap_office_trend = [0 for j in range(0,26)]
commercial_m2_cap_retail_trend = [0 for j in range(0,26)]
commercial_m2_cap_hotels_trend = [0 for j in range(0,26)]
commercial_m2_cap_govern_trend = [0 for j in range(0,26)]
# For the RESIDENTIAL & COMMERCIAL floorspace: Derive the annual trend (in m2/cap) over the initial 10 years of IMAGE data
for region in range(1,27):
floorspace_urb_trend_by_year = [0 for i in range(0,10)]
floorspace_rur_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_office_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_retail_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_hotels_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_govern_trend_by_year = [0 for i in range(0,10)]
# Get the growth by year (for the first 10 years)
for year in range(1970,1980):
floorspace_urb_trend_by_year[year-1970] = floorspace_urb[region][year+1]/floorspace_urb[region][year+2]
floorspace_rur_trend_by_year[year-1970] = floorspace_rur[region][year+1]/floorspace_rur[region][year+2]
commercial_m2_cap_office_trend_by_year[year-1970] = commercial_m2_cap_office[region][year+1]/commercial_m2_cap_office[region][year+2]
commercial_m2_cap_retail_trend_by_year[year-1970] = commercial_m2_cap_retail[region][year+1]/commercial_m2_cap_retail[region][year+2]
commercial_m2_cap_hotels_trend_by_year[year-1970] = commercial_m2_cap_hotels[region][year+1]/commercial_m2_cap_hotels[region][year+2]
commercial_m2_cap_govern_trend_by_year[year-1970] = commercial_m2_cap_govern[region][year+1]/commercial_m2_cap_govern[region][year+2]
rurpop_trend_by_region[region-1] = ((1 - (rurpop[str(region)][1980]/rurpop[str(region)][1970]))/10)*100
floorspace_urb_trend_by_region[region-1] = sum(floorspace_urb_trend_by_year)/10
floorspace_rur_trend_by_region[region-1] = sum(floorspace_rur_trend_by_year)/10
commercial_m2_cap_office_trend[region-1] = sum(commercial_m2_cap_office_trend_by_year)/10
commercial_m2_cap_retail_trend[region-1] = sum(commercial_m2_cap_retail_trend_by_year)/10
commercial_m2_cap_hotels_trend[region-1] = sum(commercial_m2_cap_hotels_trend_by_year)/10
commercial_m2_cap_govern_trend[region-1] = sum(commercial_m2_cap_govern_trend_by_year)/10
# Average global annual decline in floorspace/cap in %, rural: 1%; urban 1.2%; commercial: 1.26-2.18% /yr
floorspace_urb_trend_global = (1 - (sum(floorspace_urb_trend_by_region)/26))*100 # in % decrease per annum
floorspace_rur_trend_global = (1 - (sum(floorspace_rur_trend_by_region)/26))*100 # in % decrease per annum
commercial_m2_cap_office_trend_global = (1 - (sum(commercial_m2_cap_office_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_retail_trend_global = (1 - (sum(commercial_m2_cap_retail_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_hotels_trend_global = (1 - (sum(commercial_m2_cap_hotels_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_govern_trend_global = (1 - (sum(commercial_m2_cap_govern_trend)/26))*100 # in % decrease per annum
# define historic floorspace (1820-1970) in m2/cap
floorspace_urb_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = floorspace_urb.columns)
floorspace_rur_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = floorspace_rur.columns)
rurpop_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = rurpop.columns)
pop_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = pop2.columns)
commercial_m2_cap_office_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_govern.columns)
# Find minumum or maximum values in the original IMAGE data (Just for residential, commercial minimum values have been calculated above)
minimum_urb_fs = floorspace_urb.values.min() # Region 20: China
minimum_rur_fs = floorspace_rur.values.min() # Region 20: China
maximum_rurpop = rurpop.values.max() # Region 9 : Eastern Africa
# Calculate the actual values used between 1820 & 1970, given the trends & the min/max values
for region in range(1,regions+1):
for year in range(1820,1971):
# MAX of 1) the MINimum value & 2) the calculated value
floorspace_urb_1820_1970[region][year] = max(minimum_urb_fs, floorspace_urb[region][1971] * ((100-floorspace_urb_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
floorspace_rur_1820_1970[region][year] = max(minimum_rur_fs, floorspace_rur[region][1971] * ((100-floorspace_rur_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_office_1820_1970[region][year] = max(minimum_com_office, commercial_m2_cap_office[region][1971] * ((100-commercial_m2_cap_office_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_retail_1820_1970[region][year] = max(minimum_com_retail, commercial_m2_cap_retail[region][1971] * ((100-commercial_m2_cap_retail_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_hotels_1820_1970[region][year] = max(minimum_com_hotels, commercial_m2_cap_hotels[region][1971] * ((100-commercial_m2_cap_hotels_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_govern_1820_1970[region][year] = max(minimum_com_govern, commercial_m2_cap_govern[region][1971] * ((100-commercial_m2_cap_govern_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
# MIN of 1) the MAXimum value & 2) the calculated value
rurpop_1820_1970[str(region)][year] = min(maximum_rurpop, rurpop[str(region)][1970] * ((100 + rurpop_trend_by_region[region - 1])/100)**(1970 - year)) # average annual INcrease by region
# just add the tail to the population (no min/max & trend is pre-calculated in hist_pop)
pop_1820_1970[str(region)][year] = hist_pop[str(region)][year] * pop[str(region)][1970]
urbpop_1820_1970 = 1 - rurpop_1820_1970
# To avoid full model setup in 1820 (all required stock gets built in yr 1) we assume another tail that linearly increases to the 1820 value over a 100 year time period, so 1720 = 0
floorspace_urb_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = floorspace_urb.columns)
floorspace_rur_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = floorspace_rur.columns)
rurpop_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = rurpop.columns)
urbpop_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = urbpop.columns)
pop_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = pop2.columns)
commercial_m2_cap_office_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_govern.columns)
for region in range(1,27):
for time in range(1721,1820):
# MAX(0,...) Because of floating point deviations, leading to negative stock in some cases
floorspace_urb_1721_1820[int(region)][time] = max(0.0, floorspace_urb_1820_1970[int(region)][1820] - (floorspace_urb_1820_1970[int(region)][1820]/100)*(1820-time))
floorspace_rur_1721_1820[int(region)][time] = max(0.0, floorspace_rur_1820_1970[int(region)][1820] - (floorspace_rur_1820_1970[int(region)][1820]/100)*(1820-time))
rurpop_1721_1820[str(region)][time] = max(0.0, rurpop_1820_1970[str(region)][1820] - (rurpop_1820_1970[str(region)][1820]/100)*(1820-time))
urbpop_1721_1820[str(region)][time] = max(0.0, urbpop_1820_1970[str(region)][1820] - (urbpop_1820_1970[str(region)][1820]/100)*(1820-time))
pop_1721_1820[str(region)][time] = max(0.0, pop_1820_1970[str(region)][1820] - (pop_1820_1970[str(region)][1820]/100)*(1820-time))
commercial_m2_cap_office_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_office_1820_1970[region][1820] - (commercial_m2_cap_office_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_retail_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_retail_1820_1970[region][1820] - (commercial_m2_cap_retail_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_hotels_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_hotels_1820_1970[region][1820] - (commercial_m2_cap_hotels_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_govern_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_govern_1820_1970[region][1820] - (commercial_m2_cap_govern_1820_1970[region][1820]/100)*(1820-time))
# combine historic with IMAGE data here
rurpop_tail = rurpop_1820_1970.append(rurpop2, ignore_index = False)
urbpop_tail = urbpop_1820_1970.append(urbpop, ignore_index = False)
pop_tail = pop_1820_1970.append(pop2, ignore_index = False)
floorspace_urb_tail = floorspace_urb_1820_1970.append(floorspace_urb, ignore_index = False)
floorspace_rur_tail = floorspace_rur_1820_1970.append(floorspace_rur, ignore_index = False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index = False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index = False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index = False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index = False)
rurpop_tail = rurpop_1721_1820.append(rurpop_1820_1970.append(rurpop2, ignore_index = False), ignore_index = False)
urbpop_tail = urbpop_1721_1820.append(urbpop_1820_1970.append(urbpop, ignore_index = False), ignore_index = False)
pop_tail = pop_1721_1820.append(pop_1820_1970.append(pop2, ignore_index = False), ignore_index = False)
floorspace_urb_tail = floorspace_urb_1721_1820.append(floorspace_urb_1820_1970.append(floorspace_urb, ignore_index = False), ignore_index = False)
floorspace_rur_tail = floorspace_rur_1721_1820.append(floorspace_rur_1820_1970.append(floorspace_rur, ignore_index = False), ignore_index = False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1721_1820.append(commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index = False), ignore_index = False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1721_1820.append(commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index = False), ignore_index = False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1721_1820.append(commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index = False), ignore_index = False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1721_1820.append(commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index = False), ignore_index = False)
#%% FLOOR AREA STOCK -----------------------------------------------------------
# adjust the share for urban/rural only (shares in csv are as percantage of the total(Rur + Urb), we needed to adjust the urban shares to add up to 1, same for rural)
housing_type_rur3 = housing_type_rur2/housing_type_rur2.sum()
housing_type_urb3 = housing_type_urb2/housing_type_urb2.sum()
# calculte the total rural/urban population (pop2 = millions of people, rurpop2 = % of people living in rural areas)
people_rur = pd.DataFrame(rurpop_tail.values*pop_tail.values, columns = pop_tail.columns, index = pop_tail.index)
people_urb = pd.DataFrame(urbpop_tail.values*pop_tail.values, columns = pop_tail.columns, index = pop_tail.index)
# calculate the total number of people (urban/rural) BY HOUSING TYPE (the sum of det,sem,app & hig equals the total population e.g. people_rur)
people_det_rur = pd.DataFrame(housing_type_rur3.iloc[0].values*people_rur.values, columns = people_rur.columns, index = people_rur.index)
people_sem_rur = pd.DataFrame(housing_type_rur3.iloc[1].values*people_rur.values, columns = people_rur.columns, index = people_rur.index)
people_app_rur = pd.DataFrame(housing_type_rur3.iloc[2].values*people_rur.values, columns = people_rur.columns, index = people_rur.index)
people_hig_rur = pd.DataFrame(housing_type_rur3.iloc[3].values*people_rur.values, columns = people_rur.columns, index = people_rur.index)
people_det_urb = pd.DataFrame(housing_type_urb3.iloc[0].values*people_urb.values, columns = people_urb.columns, index = people_urb.index)
people_sem_urb = pd.DataFrame(housing_type_urb3.iloc[1].values*people_urb.values, columns = people_urb.columns, index = people_urb.index)
people_app_urb = pd.DataFrame(housing_type_urb3.iloc[2].values*people_urb.values, columns = people_urb.columns, index = people_urb.index)
people_hig_urb = pd.DataFrame(housing_type_urb3.iloc[3].values*people_urb.values, columns = people_urb.columns, index = people_urb.index)
# calculate the total m2 (urban/rural) BY HOUSING TYPE (= nr. of people * OWN avg m2, so not based on IMAGE)
m2_unadjusted_det_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[0].values * people_det_rur.values, columns = people_det_rur.columns, index = people_det_rur.index)
m2_unadjusted_sem_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[1].values * people_sem_rur.values, columns = people_sem_rur.columns, index = people_sem_rur.index)
m2_unadjusted_app_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[2].values * people_app_rur.values, columns = people_app_rur.columns, index = people_app_rur.index)
m2_unadjusted_hig_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[3].values * people_hig_rur.values, columns = people_hig_rur.columns, index = people_hig_rur.index)
m2_unadjusted_det_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[0].values * people_det_urb.values, columns = people_det_urb.columns, index = people_det_urb.index)
m2_unadjusted_sem_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[1].values * people_sem_urb.values, columns = people_sem_urb.columns, index = people_sem_urb.index)
m2_unadjusted_app_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[2].values * people_app_urb.values, columns = people_app_urb.columns, index = people_app_urb.index)
m2_unadjusted_hig_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[3].values * people_hig_urb.values, columns = people_hig_urb.columns, index = people_hig_urb.index)
# Define empty dataframes for m2 adjustments
total_m2_adj_rur = pd.DataFrame(index = m2_unadjusted_det_rur.index, columns = m2_unadjusted_det_rur.columns)
total_m2_adj_urb = pd.DataFrame(index = m2_unadjusted_det_urb.index, columns = m2_unadjusted_det_urb.columns)
# Sum all square meters in Rural area
for j in range(1721,2061,1):
for i in range(1,27,1):
total_m2_adj_rur.loc[j,str(i)] = m2_unadjusted_det_rur.loc[j,str(i)] + m2_unadjusted_sem_rur.loc[j,str(i)] + m2_unadjusted_app_rur.loc[j,str(i)] + m2_unadjusted_hig_rur.loc[j,str(i)]
# Sum all square meters in Urban area
for j in range(1721,2061,1):
for i in range(1,27,1):
total_m2_adj_urb.loc[j,str(i)] = m2_unadjusted_det_urb.loc[j,str(i)] + m2_unadjusted_sem_urb.loc[j,str(i)] + m2_unadjusted_app_urb.loc[j,str(i)] + m2_unadjusted_hig_urb.loc[j,str(i)]
# average square meter per person implied by our OWN data
avg_m2_cap_adj_rur = pd.DataFrame(total_m2_adj_rur.values / people_rur.values, columns = people_rur.columns, index = people_rur.index)
avg_m2_cap_adj_urb = pd.DataFrame(total_m2_adj_urb.values / people_urb.values, columns = people_urb.columns, index = people_urb.index)
# factor to correct square meters per capita so that we respect the IMAGE data in terms of total m2, but we use our own distinction between Building types
m2_cap_adj_fact_rur = pd.DataFrame(floorspace_rur_tail.values / avg_m2_cap_adj_rur.values, columns = floorspace_rur_tail.columns, index = floorspace_rur_tail.index)
m2_cap_adj_fact_urb = pd.DataFrame(floorspace_urb_tail.values / avg_m2_cap_adj_urb.values, columns = floorspace_urb_tail.columns, index = floorspace_urb_tail.index)
# All m2 by region (in millions), Building_type & year (using the correction factor, to comply with IMAGE avg m2/cap)
m2_det_rur = pd.DataFrame(m2_unadjusted_det_rur.values * m2_cap_adj_fact_rur.values, columns = m2_cap_adj_fact_rur.columns, index = m2_cap_adj_fact_rur.index)
m2_sem_rur = pd.DataFrame(m2_unadjusted_sem_rur.values * m2_cap_adj_fact_rur.values, columns = m2_cap_adj_fact_rur.columns, index = m2_cap_adj_fact_rur.index)
m2_app_rur = pd.DataFrame(m2_unadjusted_app_rur.values * m2_cap_adj_fact_rur.values, columns = m2_cap_adj_fact_rur.columns, index = m2_cap_adj_fact_rur.index)
m2_hig_rur = pd.DataFrame(m2_unadjusted_hig_rur.values * m2_cap_adj_fact_rur.values, columns = m2_cap_adj_fact_rur.columns, index = m2_cap_adj_fact_rur.index)
m2_det_urb = pd.DataFrame(m2_unadjusted_det_urb.values * m2_cap_adj_fact_urb.values, columns = m2_cap_adj_fact_urb.columns, index = m2_cap_adj_fact_urb.index)
m2_sem_urb = pd.DataFrame(m2_unadjusted_sem_urb.values * m2_cap_adj_fact_urb.values, columns = m2_cap_adj_fact_urb.columns, index = m2_cap_adj_fact_urb.index)
m2_app_urb = pd.DataFrame(m2_unadjusted_app_urb.values * m2_cap_adj_fact_urb.values, columns = m2_cap_adj_fact_urb.columns, index = m2_cap_adj_fact_urb.index)
m2_hig_urb = pd.DataFrame(m2_unadjusted_hig_urb.values * m2_cap_adj_fact_urb.values, columns = m2_cap_adj_fact_urb.columns, index = m2_cap_adj_fact_urb.index)
# Add a checksum to see if calculations based on adjusted OWN avg m2 (by building type) now match the total m2 according to IMAGE.
m2_sum_rur_OWN = m2_det_rur + m2_sem_rur + m2_app_rur + m2_hig_rur
m2_sum_rur_IMAGE = pd.DataFrame(floorspace_rur_tail.values*people_rur.values, columns = m2_sum_rur_OWN.columns, index = m2_sum_rur_OWN.index)
m2_checksum = m2_sum_rur_OWN - m2_sum_rur_IMAGE
if m2_checksum.sum().sum() > 0.0000001 or m2_checksum.sum().sum() < -0.0000001:
ctypes.windll.user32.MessageBoxW(0, "IMAGE & OWN m2 sums do not match", "Warning", 1)
# Total RESIDENTIAL square meters by region
m2 = m2_det_rur + m2_sem_rur + m2_app_rur + m2_hig_rur + m2_det_urb + m2_sem_urb + m2_app_urb + m2_hig_urb
# Total m2 for COMMERCIAL Buildings
commercial_m2_office = pd.DataFrame(commercial_m2_cap_office_tail.values * pop_tail.values, columns = m2_cap_adj_fact_urb.columns, index = m2_cap_adj_fact_urb.index)
commercial_m2_retail = pd.DataFrame(commercial_m2_cap_retail_tail.values * pop_tail.values, columns = m2_cap_adj_fact_urb.columns, index = m2_cap_adj_fact_urb.index)
commercial_m2_hotels = pd.DataFrame(commercial_m2_cap_hotels_tail.values * pop_tail.values, columns = m2_cap_adj_fact_urb.columns, index = m2_cap_adj_fact_urb.index)
commercial_m2_govern = pd.DataFrame(commercial_m2_cap_govern_tail.values * pop_tail.values, columns = m2_cap_adj_fact_urb.columns, index = m2_cap_adj_fact_urb.index)
#%% FLOOR AREA INFLOW & OUTFLOW
import sys
sys.path.append(dir_path)
import dynamic_stock_model
from dynamic_stock_model import DynamicStockModel as DSM
idx = pd.IndexSlice # needed for slicing multi-index
# define a function for calculating the floor area inflow and outflow
def inflow_outflown(shape, scale, stock, length): # length is the number of years in the entire period
out_oc_reg = pd.DataFrame(index = range(1721,2061), columns = pd.MultiIndex.from_product([list(range(1,27)), list(range(1721,2061))])) # Multi-index columns (region & years), to contain a matrix of years*years for each region
out_i_reg = pd.DataFrame(index = range(1721,2061), columns = range(1,27))
out_s_reg = pd.DataFrame(index = range(1721,2061), columns = range(1,27))
out_o_reg = pd.DataFrame(index = range(1721,2061), columns = range(1,27))
for region in range(1,27):
shape_list = shape.loc[region]
scale_list = scale.loc[region]
if flag_Normal == 0:
DSMforward = DSM(t = np.arange(0,length,1), s = np.array(stock[region]), lt = {'Type': 'Weibull', 'Shape': np.array(shape_list), 'Scale': np.array(scale_list)})
else:
DSMforward = DSM(t = np.arange(0,length,1), s = np.array(stock[region]), lt = {'Type': 'FoldNorm', 'Mean': np.array(shape_list), 'StdDev': np.array(scale_list)}) # shape & scale list are actually Mean & StDev here
out_sc, out_oc, out_i = DSMforward.compute_stock_driven_model(NegativeInflowCorrect = True)
out_i_reg[region] = out_i
out_oc[out_oc < 0] = 0 # remove negative outflow, replace by 0
out_oc_reg.loc[:,idx[region,:]] = out_oc
# If you are only interested in the total outflow, you can sum the outflow by cohort
out_o_reg[region] = out_oc.sum(axis = 1)
out_o_reg_corr = out_o_reg._get_numeric_data()
out_o_reg_corr[out_o_reg_corr < 0] = 0
out_s_reg[region] = out_sc.sum(axis = 1) #Stock
return out_i_reg, out_oc_reg
length = len(m2_hig_urb[1]) # = 340
#nindex = np.arange(0,26)
#% lifetime parameters (shape & scale)
lifetimes = pd.read_csv(dir_path + '/files_lifetimes/lifetimes.csv')
lifetimes_comm = pd.read_csv(dir_path + '/files_lifetimes/lifetimes_comm.csv')
# separate shape from scale
lifetimes_shape = lifetimes[['Region','Building_type','Area','Shape']]
lifetimes_scale = lifetimes[['Region','Building_type','Area','Scale']]
shape_comm = lifetimes_comm[['Region','Shape']]
scale_comm = lifetimes_comm[['Region','Scale']]
# generate time-series data structure
for i in range(1721,2061):
lifetimes_shape[i] = lifetimes_shape['Shape']
lifetimes_scale[i] = lifetimes_scale['Scale']
shape_comm[i] = shape_comm['Shape']
scale_comm[i] = scale_comm['Scale']
# *NOTE: here we have created these multiple-dimentional structures where the region, building type, and year are specified so that scenario analyses of e.g., the lifetime extension can be easily done using either Python or excel.
# For example, one can easily create a well-structured excel file 'lifetimes_scale.csv' with customised scale parameter changes and then upload this excel (.CSV) file.
# parameters by building type
lifetimes_shape = lifetimes_shape.drop(['Shape'],axis = 1)
lifetimes_scale = lifetimes_scale.drop(['Scale'],axis = 1)
shape_comm = shape_comm.drop(['Shape'],axis = 1).set_index('Region')
scale_comm = scale_comm.drop(['Scale'],axis = 1).set_index('Region')
shape_det_rur = lifetimes_shape.loc[(lifetimes_shape['Area'] == 'Rural') & (lifetimes_shape['Building_type'] == 'Detached')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
shape_sem_rur = lifetimes_shape.loc[(lifetimes_shape['Area'] == 'Rural') & (lifetimes_shape['Building_type'] == 'Semi-detached')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
shape_app_rur = lifetimes_shape.loc[(lifetimes_shape['Area'] == 'Rural') & (lifetimes_shape['Building_type'] == 'Appartments')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
shape_hig_rur = lifetimes_shape.loc[(lifetimes_shape['Area'] == 'Rural') & (lifetimes_shape['Building_type'] == 'High-rise')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
shape_det_urb = lifetimes_shape.loc[(lifetimes_shape['Area'] == 'Urban') & (lifetimes_shape['Building_type'] == 'Detached')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
shape_sem_urb = lifetimes_shape.loc[(lifetimes_shape['Area'] == 'Urban') & (lifetimes_shape['Building_type'] == 'Semi-detached')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
shape_app_urb = lifetimes_shape.loc[(lifetimes_shape['Area'] == 'Urban') & (lifetimes_shape['Building_type'] == 'Appartments')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
shape_hig_urb = lifetimes_shape.loc[(lifetimes_shape['Area'] == 'Urban') & (lifetimes_shape['Building_type'] == 'High-rise')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
scale_det_rur = lifetimes_scale.loc[(lifetimes_scale['Area'] == 'Rural') & (lifetimes_scale['Building_type'] == 'Detached')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
scale_sem_rur = lifetimes_scale.loc[(lifetimes_scale['Area'] == 'Rural') & (lifetimes_scale['Building_type'] == 'Semi-detached')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
scale_app_rur = lifetimes_scale.loc[(lifetimes_scale['Area'] == 'Rural') & (lifetimes_scale['Building_type'] == 'Appartments')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
scale_hig_rur = lifetimes_scale.loc[(lifetimes_scale['Area'] == 'Rural') & (lifetimes_scale['Building_type'] == 'High-rise')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
scale_det_urb = lifetimes_scale.loc[(lifetimes_scale['Area'] == 'Urban') & (lifetimes_scale['Building_type'] == 'Detached')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
scale_sem_urb = lifetimes_scale.loc[(lifetimes_scale['Area'] == 'Urban') & (lifetimes_scale['Building_type'] == 'Semi-detached')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
scale_app_urb = lifetimes_scale.loc[(lifetimes_scale['Area'] == 'Urban') & (lifetimes_scale['Building_type'] == 'Appartments')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
scale_hig_urb = lifetimes_scale.loc[(lifetimes_scale['Area'] == 'Urban') & (lifetimes_scale['Building_type'] == 'High-rise')].set_index('Region').drop(['Building_type', 'Area'],axis = 1)
# call the defined model to calculate inflow & outflow based on stock & lifetime
m2_det_rur_i, m2_det_rur_oc = inflow_outflown(shape_det_rur, scale_det_rur, m2_det_rur, length)
m2_sem_rur_i, m2_sem_rur_oc = inflow_outflown(shape_sem_rur, scale_sem_rur, m2_sem_rur, length)
m2_app_rur_i, m2_app_rur_oc = inflow_outflown(shape_app_rur, scale_app_rur, m2_app_rur, length)
m2_hig_rur_i, m2_hig_rur_oc = inflow_outflown(shape_hig_rur, scale_hig_rur, m2_hig_rur, length)
m2_det_urb_i, m2_det_urb_oc = inflow_outflown(shape_det_urb, scale_det_urb, m2_det_urb, length)
m2_sem_urb_i, m2_sem_urb_oc = inflow_outflown(shape_sem_urb, scale_sem_urb, m2_sem_urb, length)
m2_app_urb_i, m2_app_urb_oc = inflow_outflown(shape_app_urb, scale_app_urb, m2_app_urb, length)
m2_hig_urb_i, m2_hig_urb_oc = inflow_outflown(shape_hig_urb, scale_hig_urb, m2_hig_urb, length)
m2_office_i, m2_office_oc = inflow_outflown(shape_comm, scale_comm, commercial_m2_office, length)
m2_retail_i, m2_retail_oc = inflow_outflown(shape_comm, scale_comm, commercial_m2_retail, length)
m2_hotels_i, m2_hotels_oc = inflow_outflown(shape_comm, scale_comm, commercial_m2_hotels, length)
m2_govern_i, m2_govern_oc = inflow_outflown(shape_comm, scale_comm, commercial_m2_govern, length)
# total MILLIONS of square meters inflow
m2_res_i = m2_det_rur_i + m2_sem_rur_i + m2_app_rur_i + m2_hig_rur_i + m2_det_urb_i + m2_sem_urb_i + m2_app_urb_i + m2_hig_urb_i
m2_comm_i = m2_office_i + m2_retail_i + m2_hotels_i + m2_govern_i
#%% MATERIAL INTENSITY RESTRUCTURING (to become consistent with floor area dataset)-----------------------------------------------------------
# separate different materials
building_materials_steel = building_materials[['Region','Building_type','steel']]
building_materials_brick_rural = building_materials[['Region','Building_type','brick_rural']]
building_materials_brick_urban = building_materials[['Region','Building_type','brick_urban']]
building_materials_concrete = building_materials[['Region','Building_type','concrete']]
building_materials_wood = building_materials[['Region','Building_type','wood']]
building_materials_copper = building_materials[['Region','Building_type','copper']]
building_materials_aluminium = building_materials[['Region','Building_type','aluminium']]
building_materials_glass = building_materials[['Region','Building_type','glass']]
materials_commercial_steel = materials_commercial[['Region','Building_type','steel']]
materials_commercial_brick = materials_commercial[['Region','Building_type','brick']]
materials_commercial_concrete = materials_commercial[['Region','Building_type','concrete']]
materials_commercial_wood = materials_commercial[['Region','Building_type','wood']]
materials_commercial_copper = materials_commercial[['Region','Building_type','copper']]
materials_commercial_aluminium = materials_commercial[['Region','Building_type','aluminium']]
materials_commercial_glass = materials_commercial[['Region','Building_type','glass']]
# generate time-series data structure
for i in range(1721,2061):
building_materials_steel[i] = building_materials_steel['steel']
building_materials_concrete[i] = building_materials_concrete['concrete']
building_materials_brick_rural[i] = building_materials_brick_rural['brick_rural']
building_materials_brick_urban[i] = building_materials_brick_urban['brick_urban']
building_materials_wood[i] = building_materials_wood['wood']
building_materials_copper[i] = building_materials_copper['copper']
building_materials_aluminium[i] = building_materials_aluminium['aluminium']
building_materials_glass[i] = building_materials_glass['glass']
materials_commercial_steel[i] = materials_commercial_steel['steel']
materials_commercial_brick[i] = materials_commercial_brick['brick']
materials_commercial_concrete[i] = materials_commercial_concrete['concrete']
materials_commercial_wood[i] = materials_commercial_wood['wood']
materials_commercial_copper[i] = materials_commercial_copper['copper']
materials_commercial_aluminium[i] = materials_commercial_aluminium['aluminium']
materials_commercial_glass[i] = materials_commercial_glass['glass']
building_materials_steel = building_materials_steel.drop(['steel'],axis = 1)
building_materials_concrete = building_materials_concrete.drop(['concrete'],axis = 1)
building_materials_brick_rural = building_materials_brick_rural.drop(['brick_rural'],axis = 1)
building_materials_brick_urban = building_materials_brick_urban.drop(['brick_urban'],axis = 1)
building_materials_wood = building_materials_wood.drop(['wood'],axis = 1)
building_materials_copper = building_materials_copper.drop(['copper'],axis = 1)
building_materials_aluminium = building_materials_aluminium.drop(['aluminium'],axis = 1)
building_materials_glass = building_materials_glass.drop(['glass'],axis = 1)
materials_commercial_steel = materials_commercial_steel.drop(['steel'],axis = 1)
materials_commercial_brick = materials_commercial_brick.drop(['brick'],axis = 1)
materials_commercial_concrete = materials_commercial_concrete.drop(['concrete'],axis = 1)
materials_commercial_wood = materials_commercial_wood.drop(['wood'],axis = 1)
materials_commercial_copper = materials_commercial_copper.drop(['copper'],axis = 1)
materials_commercial_aluminium = materials_commercial_aluminium.drop(['aluminium'],axis = 1)
materials_commercial_glass = materials_commercial_glass.drop(['glass'],axis = 1)
# *NOTE: here we have created these multiple-dimentional structures where the region, building type, and year are specified so that scenario analyses of e.g., light-weighting / substitution can be easily done using either Python or excel.
# For example, one can easily create a well-structured excel file 'building_materials_steel.csv' with customised material intensity changes and then upload this excel (.CSV) file.
# steel intensity
material_steel_det = building_materials_steel.loc[(building_materials_steel['Building_type']=='Detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_det_rur.index)
material_steel_sem = building_materials_steel.loc[(building_materials_steel['Building_type']=='Semi-detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_sem_rur.index)
material_steel_app = building_materials_steel.loc[(building_materials_steel['Building_type']=='Appartments')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_app_rur.index)
material_steel_hig = building_materials_steel.loc[(building_materials_steel['Building_type']=='High-rise')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_hig_rur.index)
materials_steel_office = materials_commercial_steel.loc[(materials_commercial_steel['Building_type']=='Offices')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_office.index)
materials_steel_retail = materials_commercial_steel.loc[(materials_commercial_steel['Building_type']=='Retail+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_retail.index)
materials_steel_hotels = materials_commercial_steel.loc[(materials_commercial_steel['Building_type']=='Hotels+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_hotels.index)
materials_steel_govern = materials_commercial_steel.loc[(materials_commercial_steel['Building_type']=='Govt+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_govern.index)
# brick intensity
material_brick_det_rural = building_materials_brick_rural.loc[(building_materials_brick_rural['Building_type']=='Detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_det_rur.index)
material_brick_sem_rural = building_materials_brick_rural.loc[(building_materials_brick_rural['Building_type']=='Semi-detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_sem_rur.index)
material_brick_app_rural = building_materials_brick_rural.loc[(building_materials_brick_rural['Building_type']=='Appartments')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_app_rur.index)
material_brick_hig_rural = building_materials_brick_rural.loc[(building_materials_brick_rural['Building_type']=='High-rise')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_hig_rur.index)
material_brick_det_urban = building_materials_brick_urban.loc[(building_materials_brick_urban['Building_type']=='Detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_det_urb.index)
material_brick_sem_urban = building_materials_brick_urban.loc[(building_materials_brick_urban['Building_type']=='Semi-detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_sem_urb.index)
material_brick_app_urban = building_materials_brick_urban.loc[(building_materials_brick_urban['Building_type']=='Appartments')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_app_urb.index)
material_brick_hig_urban = building_materials_brick_urban.loc[(building_materials_brick_urban['Building_type']=='High-rise')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_hig_urb.index)
materials_brick_office = materials_commercial_brick.loc[(materials_commercial_brick['Building_type']=='Offices')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_office.index)
materials_brick_retail = materials_commercial_brick.loc[(materials_commercial_brick['Building_type']=='Retail+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_retail.index)
materials_brick_hotels = materials_commercial_brick.loc[(materials_commercial_brick['Building_type']=='Hotels+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_hotels.index)
materials_brick_govern = materials_commercial_brick.loc[(materials_commercial_brick['Building_type']=='Govt+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_govern.index)
# concrete intensity
material_concrete_det = building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_det_rur.index)
material_concrete_sem = building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Semi-detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_sem_rur.index)
material_concrete_app = building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Appartments')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_app_rur.index)
material_concrete_hig = building_materials_concrete.loc[(building_materials_concrete['Building_type']=='High-rise')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_hig_rur.index)
materials_concrete_office = materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Offices')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_office.index)
materials_concrete_retail = materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Retail+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_retail.index)
materials_concrete_hotels = materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Hotels+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_hotels.index)
materials_concrete_govern = materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Govt+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_govern.index)
# wood intensity
material_wood_det = building_materials_wood.loc[(building_materials_wood['Building_type']=='Detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_det_rur.index)
material_wood_sem = building_materials_wood.loc[(building_materials_wood['Building_type']=='Semi-detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_sem_rur.index)
material_wood_app = building_materials_wood.loc[(building_materials_wood['Building_type']=='Appartments')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_app_rur.index)
material_wood_hig = building_materials_wood.loc[(building_materials_wood['Building_type']=='High-rise')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_hig_rur.index)
materials_wood_office = materials_commercial_wood.loc[(materials_commercial_wood['Building_type']=='Offices')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_office.index)
materials_wood_retail = materials_commercial_wood.loc[(materials_commercial_wood['Building_type']=='Retail+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_retail.index)
materials_wood_hotels = materials_commercial_wood.loc[(materials_commercial_wood['Building_type']=='Hotels+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_hotels.index)
materials_wood_govern = materials_commercial_wood.loc[(materials_commercial_wood['Building_type']=='Govt+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_govern.index)
# copper intensity
material_copper_det = building_materials_copper.loc[(building_materials_copper['Building_type']=='Detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_det_rur.index)
material_copper_sem = building_materials_copper.loc[(building_materials_copper['Building_type']=='Semi-detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_sem_rur.index)
material_copper_app = building_materials_copper.loc[(building_materials_copper['Building_type']=='Appartments')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_app_rur.index)
material_copper_hig = building_materials_copper.loc[(building_materials_copper['Building_type']=='High-rise')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_hig_rur.index)
materials_copper_office = materials_commercial_copper.loc[(materials_commercial_copper['Building_type']=='Offices')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_office.index)
materials_copper_retail = materials_commercial_copper.loc[(materials_commercial_copper['Building_type']=='Retail+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_retail.index)
materials_copper_hotels = materials_commercial_copper.loc[(materials_commercial_copper['Building_type']=='Hotels+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_hotels.index)
materials_copper_govern = materials_commercial_copper.loc[(materials_commercial_copper['Building_type']=='Govt+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_govern.index)
# aluminium intensity
material_aluminium_det = building_materials_aluminium.loc[(building_materials_aluminium['Building_type']=='Detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_det_rur.index)
material_aluminium_sem = building_materials_aluminium.loc[(building_materials_aluminium['Building_type']=='Semi-detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_sem_rur.index)
material_aluminium_app = building_materials_aluminium.loc[(building_materials_aluminium['Building_type']=='Appartments')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_app_rur.index)
material_aluminium_hig = building_materials_aluminium.loc[(building_materials_aluminium['Building_type']=='High-rise')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_hig_rur.index)
materials_aluminium_office = materials_commercial_aluminium.loc[(materials_commercial_aluminium['Building_type']=='Offices')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_office.index)
materials_aluminium_retail = materials_commercial_aluminium.loc[(materials_commercial_aluminium['Building_type']=='Retail+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_retail.index)
materials_aluminium_hotels = materials_commercial_aluminium.loc[(materials_commercial_aluminium['Building_type']=='Hotels+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_hotels.index)
materials_aluminium_govern = materials_commercial_aluminium.loc[(materials_commercial_aluminium['Building_type']=='Govt+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_govern.index)
# glass intensity
material_glass_det = building_materials_glass.loc[(building_materials_glass['Building_type']=='Detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_det_rur.index)
material_glass_sem = building_materials_glass.loc[(building_materials_glass['Building_type']=='Semi-detached')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_sem_rur.index)
material_glass_app = building_materials_glass.loc[(building_materials_glass['Building_type']=='Appartments')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_app_rur.index)
material_glass_hig = building_materials_glass.loc[(building_materials_glass['Building_type']=='High-rise')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(m2_hig_rur.index)
materials_glass_office = materials_commercial_glass.loc[(materials_commercial_glass['Building_type']=='Offices')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_office.index)
materials_glass_retail = materials_commercial_glass.loc[(materials_commercial_glass['Building_type']=='Retail+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_retail.index)
materials_glass_hotels = materials_commercial_glass.loc[(materials_commercial_glass['Building_type']=='Hotels+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_hotels.index)
materials_glass_govern = materials_commercial_glass.loc[(materials_commercial_glass['Building_type']=='Govt+')].set_index('Region').drop(['Building_type'],axis = 1).T.set_index(commercial_m2_govern.index)
#%% Material inflow & outflow
#% Material inflow (Millions of kgs = *1000 tons)
# steel
kg_det_rur_steel_i = m2_det_rur_i * material_steel_det
kg_sem_rur_steel_i = m2_sem_rur_i * material_steel_sem
kg_app_rur_steel_i = m2_app_rur_i * material_steel_app
kg_hig_rur_steel_i = m2_hig_rur_i * material_steel_hig
kg_det_urb_steel_i = m2_det_urb_i * material_steel_det
kg_sem_urb_steel_i = m2_sem_urb_i * material_steel_sem
kg_app_urb_steel_i = m2_app_urb_i * material_steel_app
kg_hig_urb_steel_i = m2_hig_urb_i * material_steel_hig
kg_office_steel_i = m2_office_i * materials_steel_office
kg_retail_steel_i = m2_retail_i * materials_steel_retail
kg_hotels_steel_i = m2_hotels_i * materials_steel_hotels
kg_govern_steel_i = m2_govern_i * materials_steel_govern
# brick
kg_det_rur_brick_i = m2_det_rur_i * material_brick_det_rural
kg_sem_rur_brick_i = m2_sem_rur_i * material_brick_sem_rural
kg_app_rur_brick_i = m2_app_rur_i * material_brick_app_rural
kg_hig_rur_brick_i = m2_hig_rur_i * material_brick_hig_rural
kg_det_urb_brick_i = m2_det_urb_i * material_brick_det_urban
kg_sem_urb_brick_i = m2_sem_urb_i * material_brick_sem_urban
kg_app_urb_brick_i = m2_app_urb_i * material_brick_app_urban
kg_hig_urb_brick_i = m2_hig_urb_i * material_brick_hig_urban
kg_office_brick_i = m2_office_i * materials_brick_office
kg_retail_brick_i = m2_retail_i * materials_brick_retail
kg_hotels_brick_i = m2_hotels_i * materials_brick_hotels
kg_govern_brick_i = m2_govern_i * materials_brick_govern
# concrete
kg_det_rur_concrete_i = m2_det_rur_i * material_concrete_det
kg_sem_rur_concrete_i = m2_sem_rur_i * material_concrete_sem
kg_app_rur_concrete_i = m2_app_rur_i * material_concrete_app
kg_hig_rur_concrete_i = m2_hig_rur_i * material_concrete_hig
kg_det_urb_concrete_i = m2_det_urb_i * material_concrete_det
kg_sem_urb_concrete_i = m2_sem_urb_i * material_concrete_sem
kg_app_urb_concrete_i = m2_app_urb_i * material_concrete_app
kg_hig_urb_concrete_i = m2_hig_urb_i * material_concrete_hig
kg_office_concrete_i = m2_office_i * materials_concrete_office
kg_retail_concrete_i = m2_retail_i * materials_concrete_retail
kg_hotels_concrete_i = m2_hotels_i * materials_concrete_hotels
kg_govern_concrete_i = m2_govern_i * materials_concrete_govern
# wood
kg_det_rur_wood_i = m2_det_rur_i * material_wood_det
kg_sem_rur_wood_i = m2_sem_rur_i * material_wood_sem
kg_app_rur_wood_i = m2_app_rur_i * material_wood_app
kg_hig_rur_wood_i = m2_hig_rur_i * material_wood_hig
kg_det_urb_wood_i = m2_det_urb_i * material_wood_det
kg_sem_urb_wood_i = m2_sem_urb_i * material_wood_sem
kg_app_urb_wood_i = m2_app_urb_i * material_wood_app
kg_hig_urb_wood_i = m2_hig_urb_i * material_wood_hig
kg_office_wood_i = m2_office_i * materials_wood_office
kg_retail_wood_i = m2_retail_i * materials_wood_retail
kg_hotels_wood_i = m2_hotels_i * materials_wood_hotels
kg_govern_wood_i = m2_govern_i * materials_wood_govern
# copper
kg_det_rur_copper_i = m2_det_rur_i * material_copper_det
kg_sem_rur_copper_i = m2_sem_rur_i * material_copper_sem
kg_app_rur_copper_i = m2_app_rur_i * material_copper_app
kg_hig_rur_copper_i = m2_hig_rur_i * material_copper_hig
kg_det_urb_copper_i = m2_det_urb_i * material_copper_det
kg_sem_urb_copper_i = m2_sem_urb_i * material_copper_sem
kg_app_urb_copper_i = m2_app_urb_i * material_copper_app
kg_hig_urb_copper_i = m2_hig_urb_i * material_copper_hig
kg_office_copper_i = m2_office_i * materials_copper_office
kg_retail_copper_i = m2_retail_i * materials_copper_retail
kg_hotels_copper_i = m2_hotels_i * materials_copper_hotels
kg_govern_copper_i = m2_govern_i * materials_copper_govern
# aluminium
kg_det_rur_aluminium_i = m2_det_rur_i * material_aluminium_det
kg_sem_rur_aluminium_i = m2_sem_rur_i * material_aluminium_sem
kg_app_rur_aluminium_i = m2_app_rur_i * material_aluminium_app
kg_hig_rur_aluminium_i = m2_hig_rur_i * material_aluminium_hig
kg_det_urb_aluminium_i = m2_det_urb_i * material_aluminium_det
kg_sem_urb_aluminium_i = m2_sem_urb_i * material_aluminium_sem
kg_app_urb_aluminium_i = m2_app_urb_i * material_aluminium_app
kg_hig_urb_aluminium_i = m2_hig_urb_i * material_aluminium_hig
kg_office_aluminium_i = m2_office_i * materials_aluminium_office
kg_retail_aluminium_i = m2_retail_i * materials_aluminium_retail
kg_hotels_aluminium_i = m2_hotels_i * materials_aluminium_hotels
kg_govern_aluminium_i = m2_govern_i * materials_aluminium_govern
# glass
kg_det_rur_glass_i = m2_det_rur_i * material_glass_det
kg_sem_rur_glass_i = m2_sem_rur_i * material_glass_sem
kg_app_rur_glass_i = m2_app_rur_i * material_glass_app
kg_hig_rur_glass_i = m2_hig_rur_i * material_glass_hig
kg_det_urb_glass_i = m2_det_urb_i * material_glass_det
kg_sem_urb_glass_i = m2_sem_urb_i * material_glass_sem
kg_app_urb_glass_i = m2_app_urb_i * material_glass_app
kg_hig_urb_glass_i = m2_hig_urb_i * material_glass_hig
kg_office_glass_i = m2_office_i * materials_glass_office
kg_retail_glass_i = m2_retail_i * materials_glass_retail
kg_hotels_glass_i = m2_hotels_i * materials_glass_hotels
kg_govern_glass_i = m2_govern_i * materials_glass_govern
#% Material outflow (Millions of kgs = *1000 tons)
# first define a function for calculating the material outflow by cohort
def material_outflow(m2_outflow_cohort,material_density):
emp = []
for i in range(0,26):
md = material_density.iloc[:,i]
m2 = m2_outflow_cohort.loc[:,(i+1,1721):(i+1,2060)]
m2.columns = md.index
material_outflow_cohort = m2*md
material_outflow_cohort_sum = material_outflow_cohort.sum(1)
emp.append(material_outflow_cohort_sum)
result = pd.DataFrame(emp)
result.index = range(1, 27)
return result.T
# steel outflow
kg_det_rur_steel_o = material_outflow(m2_det_rur_oc, material_steel_det)
kg_sem_rur_steel_o = material_outflow(m2_sem_rur_oc, material_steel_sem)
kg_app_rur_steel_o = material_outflow(m2_app_rur_oc, material_steel_app)
kg_hig_rur_steel_o = material_outflow(m2_hig_rur_oc, material_steel_hig)
kg_det_urb_steel_o = material_outflow(m2_det_urb_oc, material_steel_det)
kg_sem_urb_steel_o = material_outflow(m2_sem_urb_oc, material_steel_sem)
kg_app_urb_steel_o = material_outflow(m2_app_urb_oc, material_steel_app)
kg_hig_urb_steel_o = material_outflow(m2_hig_urb_oc, material_steel_hig)
kg_office_steel_o = material_outflow(m2_office_oc, materials_steel_office)
kg_retail_steel_o = material_outflow(m2_retail_oc, materials_steel_retail)
kg_hotels_steel_o = material_outflow(m2_hotels_oc, materials_steel_hotels)
kg_govern_steel_o = material_outflow(m2_govern_oc, materials_steel_govern)
# brick outflow
kg_det_rur_brick_o = material_outflow(m2_det_rur_oc, material_brick_det_rural)
kg_sem_rur_brick_o = material_outflow(m2_sem_rur_oc, material_brick_sem_rural)
kg_app_rur_brick_o = material_outflow(m2_app_rur_oc, material_brick_app_rural)
kg_hig_rur_brick_o = material_outflow(m2_hig_rur_oc, material_brick_hig_rural)
kg_det_urb_brick_o = material_outflow(m2_det_urb_oc, material_brick_det_urban)
kg_sem_urb_brick_o = material_outflow(m2_sem_urb_oc, material_brick_sem_urban)
kg_app_urb_brick_o = material_outflow(m2_app_urb_oc, material_brick_app_urban)
kg_hig_urb_brick_o = material_outflow(m2_hig_urb_oc, material_brick_hig_urban)
kg_office_brick_o = material_outflow(m2_office_oc, materials_brick_office)
kg_retail_brick_o = material_outflow(m2_retail_oc, materials_brick_retail)
kg_hotels_brick_o = material_outflow(m2_hotels_oc, materials_brick_hotels)
kg_govern_brick_o = material_outflow(m2_govern_oc, materials_brick_govern)
# concrete outflow
kg_det_rur_concrete_o = material_outflow(m2_det_rur_oc,material_concrete_det)
kg_sem_rur_concrete_o = material_outflow(m2_sem_rur_oc, material_concrete_sem)
kg_app_rur_concrete_o = material_outflow(m2_app_rur_oc, material_concrete_app)
kg_hig_rur_concrete_o = material_outflow(m2_hig_rur_oc, material_concrete_hig)
kg_det_urb_concrete_o = material_outflow(m2_det_urb_oc, material_concrete_det)
kg_sem_urb_concrete_o = material_outflow(m2_sem_urb_oc, material_concrete_sem)
kg_app_urb_concrete_o = material_outflow(m2_app_urb_oc, material_concrete_app)
kg_hig_urb_concrete_o = material_outflow(m2_hig_urb_oc, material_concrete_hig)
kg_office_concrete_o = material_outflow(m2_office_oc, materials_concrete_office)
kg_retail_concrete_o = material_outflow(m2_retail_oc, materials_concrete_retail)
kg_hotels_concrete_o = material_outflow(m2_hotels_oc, materials_concrete_hotels)
kg_govern_concrete_o = material_outflow(m2_govern_oc, materials_concrete_govern)
# wood outflow
kg_det_rur_wood_o = material_outflow(m2_det_rur_oc, material_wood_det)
kg_sem_rur_wood_o = material_outflow(m2_sem_rur_oc, material_wood_sem)
kg_app_rur_wood_o = material_outflow(m2_app_rur_oc, material_wood_app)
kg_hig_rur_wood_o = material_outflow(m2_hig_rur_oc, material_wood_hig)
kg_det_urb_wood_o = material_outflow(m2_det_urb_oc, material_wood_det)
kg_sem_urb_wood_o = material_outflow(m2_sem_urb_oc, material_wood_sem)
kg_app_urb_wood_o = material_outflow(m2_app_urb_oc, material_wood_app)
kg_hig_urb_wood_o = material_outflow(m2_hig_urb_oc, material_wood_hig)
kg_office_wood_o = material_outflow(m2_office_oc, materials_wood_office)
kg_retail_wood_o = material_outflow(m2_retail_oc, materials_wood_retail)
kg_hotels_wood_o = material_outflow(m2_hotels_oc, materials_wood_hotels)
kg_govern_wood_o = material_outflow(m2_govern_oc, materials_wood_govern)
# copper outflow
kg_det_rur_copper_o = material_outflow(m2_det_rur_oc, material_copper_det)
kg_sem_rur_copper_o = material_outflow(m2_sem_rur_oc, material_copper_sem)
kg_app_rur_copper_o = material_outflow(m2_app_rur_oc, material_copper_app)
kg_hig_rur_copper_o = material_outflow(m2_hig_rur_oc, material_copper_hig)
kg_det_urb_copper_o = material_outflow(m2_det_urb_oc, material_copper_det)
kg_sem_urb_copper_o = material_outflow(m2_sem_urb_oc, material_copper_sem)
kg_app_urb_copper_o = material_outflow(m2_app_urb_oc, material_copper_app)
kg_hig_urb_copper_o = material_outflow(m2_hig_urb_oc, material_copper_hig)
kg_office_copper_o = material_outflow(m2_office_oc, materials_copper_office)
kg_retail_copper_o = material_outflow(m2_retail_oc, materials_copper_retail)
kg_hotels_copper_o = material_outflow(m2_hotels_oc, materials_copper_hotels)
kg_govern_copper_o = material_outflow(m2_govern_oc, materials_copper_govern)
# aluminium outflow
kg_det_rur_aluminium_o = material_outflow(m2_det_rur_oc, material_aluminium_det)
kg_sem_rur_aluminium_o = material_outflow(m2_sem_rur_oc, material_aluminium_sem)
kg_app_rur_aluminium_o = material_outflow(m2_app_rur_oc, material_aluminium_app)
kg_hig_rur_aluminium_o = material_outflow(m2_hig_rur_oc, material_aluminium_hig)
kg_det_urb_aluminium_o = material_outflow(m2_det_urb_oc, material_aluminium_det)
kg_sem_urb_aluminium_o = material_outflow(m2_sem_urb_oc, material_aluminium_sem)
kg_app_urb_aluminium_o = material_outflow(m2_app_urb_oc, material_aluminium_app)
kg_hig_urb_aluminium_o = material_outflow(m2_hig_urb_oc, material_aluminium_hig)
kg_office_aluminium_o = material_outflow(m2_office_oc, materials_aluminium_office)
kg_retail_aluminium_o = material_outflow(m2_retail_oc, materials_aluminium_retail)
kg_hotels_aluminium_o = material_outflow(m2_hotels_oc, materials_aluminium_hotels)
kg_govern_aluminium_o = material_outflow(m2_govern_oc, materials_aluminium_govern)
# glass outflow
kg_det_rur_glass_o = material_outflow(m2_det_rur_oc, material_glass_det)
kg_sem_rur_glass_o = material_outflow(m2_sem_rur_oc, material_glass_sem)
kg_app_rur_glass_o = material_outflow(m2_app_rur_oc, material_glass_app)
kg_hig_rur_glass_o = material_outflow(m2_hig_rur_oc, material_glass_hig)
kg_det_urb_glass_o = material_outflow(m2_det_urb_oc, material_glass_det)
kg_sem_urb_glass_o = material_outflow(m2_sem_urb_oc, material_glass_sem)
kg_app_urb_glass_o = material_outflow(m2_app_urb_oc, material_glass_app)
kg_hig_urb_glass_o = material_outflow(m2_hig_urb_oc, material_glass_hig)
kg_office_glass_o = material_outflow(m2_office_oc, materials_glass_office)
kg_retail_glass_o = material_outflow(m2_retail_oc, materials_glass_retail)
kg_hotels_glass_o = material_outflow(m2_hotels_oc, materials_glass_hotels)
kg_govern_glass_o = material_outflow(m2_govern_oc, materials_glass_govern)
#%% CSV output (material inflow & outflow)
# first, define a function to transpose + combine all variables & add columns to identify material, area & appartment type. Only for csv output
length = 2
tag = ['inflow', 'outflow']
def preprocess(inflow, outflow, area, building, material):
output_combined = [[]] * length
output_combined[0] = inflow.transpose()
output_combined[1] = outflow.transpose()
for item in range(0,length):
output_combined[item].insert(0,'material', [material] * 26)
output_combined[item].insert(0,'area', [area] * 26)
output_combined[item].insert(0,'type', [building] * 26)
output_combined[item].insert(0,'flow', [tag[item]] * 26)
return output_combined
# steel output
kg_det_rur_steel_out = preprocess(kg_det_rur_steel_i, kg_det_rur_steel_o, 'rural','detached', 'steel')
kg_sem_rur_steel_out = preprocess(kg_sem_rur_steel_i, kg_sem_rur_steel_o, 'rural','semi-detached', 'steel')
kg_app_rur_steel_out = preprocess(kg_app_rur_steel_i, kg_app_rur_steel_o, 'rural','appartments', 'steel')
kg_hig_rur_steel_out = preprocess(kg_hig_rur_steel_i, kg_hig_rur_steel_o, 'rural','high-rise', 'steel')
kg_det_urb_steel_out = preprocess(kg_det_urb_steel_i, kg_det_urb_steel_o, 'urban','detached', 'steel')
kg_sem_urb_steel_out = preprocess(kg_sem_urb_steel_i, kg_sem_urb_steel_o, 'urban','semi-detached', 'steel')
kg_app_urb_steel_out = preprocess(kg_app_urb_steel_i, kg_app_urb_steel_o, 'urban','appartments', 'steel')
kg_hig_urb_steel_out = preprocess(kg_hig_urb_steel_i, kg_hig_urb_steel_o, 'urban','high-rise', 'steel')
kg_office_steel_out = preprocess(kg_office_steel_i, kg_office_steel_o, 'commercial','office', 'steel')
kg_retail_steel_out = preprocess(kg_retail_steel_i, kg_retail_steel_o, 'commercial','retail', 'steel')
kg_hotels_steel_out = preprocess(kg_hotels_steel_i, kg_hotels_steel_o, 'commercial','hotels', 'steel')
kg_govern_steel_out = preprocess(kg_govern_steel_i, kg_govern_steel_o, 'commercial','govern', 'steel')
# brick output
kg_det_rur_brick_out = preprocess(kg_det_rur_brick_i, kg_det_rur_brick_o, 'rural','detached', 'brick')
kg_sem_rur_brick_out = preprocess(kg_sem_rur_brick_i, kg_sem_rur_brick_o, 'rural','semi-detached', 'brick')
kg_app_rur_brick_out = preprocess(kg_app_rur_brick_i, kg_app_rur_brick_o, 'rural','appartments', 'brick')
kg_hig_rur_brick_out = preprocess(kg_hig_rur_brick_i, kg_hig_rur_brick_o, 'rural','high-rise', 'brick')
kg_det_urb_brick_out = preprocess(kg_det_urb_brick_i, kg_det_urb_brick_o, 'urban','detached', 'brick')
kg_sem_urb_brick_out = preprocess(kg_sem_urb_brick_i, kg_sem_urb_brick_o, 'urban','semi-detached', 'brick')
kg_app_urb_brick_out = preprocess(kg_app_urb_brick_i, kg_app_urb_brick_o, 'urban','appartments', 'brick')
kg_hig_urb_brick_out = preprocess(kg_hig_urb_brick_i, kg_hig_urb_brick_o, 'urban','high-rise', 'brick')
kg_office_brick_out = preprocess(kg_office_brick_i, kg_office_brick_o, 'commercial','office', 'brick')
kg_retail_brick_out = preprocess(kg_retail_brick_i, kg_retail_brick_o, 'commercial','retail', 'brick')
kg_hotels_brick_out = preprocess(kg_hotels_brick_i, kg_hotels_brick_o, 'commercial','hotels', 'brick')
kg_govern_brick_out = preprocess(kg_govern_brick_i, kg_govern_brick_o, 'commercial','govern', 'brick')
# concrete output
kg_det_rur_concrete_out = preprocess(kg_det_rur_concrete_i, kg_det_rur_concrete_o, 'rural','detached', 'concrete')
kg_sem_rur_concrete_out = preprocess(kg_sem_rur_concrete_i, kg_sem_rur_concrete_o, 'rural','semi-detached', 'concrete')
kg_app_rur_concrete_out = preprocess(kg_app_rur_concrete_i, kg_app_rur_concrete_o, 'rural','appartments', 'concrete')
kg_hig_rur_concrete_out = preprocess(kg_hig_rur_concrete_i, kg_hig_rur_concrete_o, 'rural','high-rise', 'concrete')
kg_det_urb_concrete_out = preprocess(kg_det_urb_concrete_i, kg_det_urb_concrete_o, 'urban','detached', 'concrete')
kg_sem_urb_concrete_out = preprocess(kg_sem_urb_concrete_i, kg_sem_urb_concrete_o, 'urban','semi-detached', 'concrete')
kg_app_urb_concrete_out = preprocess(kg_app_urb_concrete_i, kg_app_urb_concrete_o, 'urban','appartments', 'concrete')
kg_hig_urb_concrete_out = preprocess(kg_hig_urb_concrete_i, kg_hig_urb_concrete_o, 'urban','high-rise', 'concrete')
kg_office_concrete_out = preprocess(kg_office_concrete_i, kg_office_concrete_o, 'commercial','office', 'concrete')
kg_retail_concrete_out = preprocess(kg_retail_concrete_i, kg_retail_concrete_o, 'commercial','retail', 'concrete')
kg_hotels_concrete_out = preprocess(kg_hotels_concrete_i, kg_hotels_concrete_o, 'commercial','hotels', 'concrete')
kg_govern_concrete_out = preprocess(kg_govern_concrete_i, kg_govern_concrete_o, 'commercial','govern', 'concrete')
# wood output
kg_det_rur_wood_out = preprocess(kg_det_rur_wood_i, kg_det_rur_wood_o, 'rural','detached', 'wood')
kg_sem_rur_wood_out = preprocess(kg_sem_rur_wood_i, kg_sem_rur_wood_o, 'rural','semi-detached', 'wood')
kg_app_rur_wood_out = preprocess(kg_app_rur_wood_i, kg_app_rur_wood_o, 'rural','appartments', 'wood')
kg_hig_rur_wood_out = preprocess(kg_hig_rur_wood_i, kg_hig_rur_wood_o, 'rural','high-rise', 'wood')
kg_det_urb_wood_out = preprocess(kg_det_urb_wood_i, kg_det_urb_wood_o, 'urban','detached', 'wood')
kg_sem_urb_wood_out = preprocess(kg_sem_urb_wood_i, kg_sem_urb_wood_o, 'urban','semi-detached', 'wood')
kg_app_urb_wood_out = preprocess(kg_app_urb_wood_i, kg_app_urb_wood_o, 'urban','appartments', 'wood')
kg_hig_urb_wood_out = preprocess(kg_hig_urb_wood_i, kg_hig_urb_wood_o, 'urban','high-rise', 'wood')
kg_office_wood_out = preprocess(kg_office_wood_i, kg_office_wood_o, 'commercial','office', 'wood')
kg_retail_wood_out = preprocess(kg_retail_wood_i, kg_retail_wood_o, 'commercial','retail', 'wood')
kg_hotels_wood_out = preprocess(kg_hotels_wood_i, kg_hotels_wood_o, 'commercial','hotels', 'wood')
kg_govern_wood_out = preprocess(kg_govern_wood_i, kg_govern_wood_o, 'commercial','govern', 'wood')
# copper output
kg_det_rur_copper_out = preprocess(kg_det_rur_copper_i, kg_det_rur_copper_o, 'rural','detached', 'copper')
kg_sem_rur_copper_out = preprocess(kg_sem_rur_copper_i, kg_sem_rur_copper_o, 'rural','semi-detached', 'copper')
kg_app_rur_copper_out = preprocess(kg_app_rur_copper_i, kg_app_rur_copper_o, 'rural','appartments', 'copper')
kg_hig_rur_copper_out = preprocess(kg_hig_rur_copper_i, kg_hig_rur_copper_o, 'rural','high-rise', 'copper')
kg_det_urb_copper_out = preprocess(kg_det_urb_copper_i, kg_det_urb_copper_o, 'urban','detached', 'copper')
kg_sem_urb_copper_out = preprocess(kg_sem_urb_copper_i, kg_sem_urb_copper_o, 'urban','semi-detached', 'copper')
kg_app_urb_copper_out = preprocess(kg_app_urb_copper_i, kg_app_urb_copper_o, 'urban','appartments', 'copper')
kg_hig_urb_copper_out = preprocess(kg_hig_urb_copper_i, kg_hig_urb_copper_o, 'urban','high-rise', 'copper')
kg_office_copper_out = preprocess(kg_office_copper_i, kg_office_copper_o, 'commercial','office', 'copper')
kg_retail_copper_out = preprocess(kg_retail_copper_i, kg_retail_copper_o, 'commercial','retail', 'copper')
kg_hotels_copper_out = preprocess(kg_hotels_copper_i, kg_hotels_copper_o, 'commercial','hotels', 'copper')
kg_govern_copper_out = preprocess(kg_govern_copper_i, kg_govern_copper_o, 'commercial','govern', 'copper')
# aluminium output
kg_det_rur_aluminium_out = preprocess(kg_det_rur_aluminium_i, kg_det_rur_aluminium_o, 'rural','detached', 'aluminium')
kg_sem_rur_aluminium_out = preprocess(kg_sem_rur_aluminium_i, kg_sem_rur_aluminium_o, 'rural','semi-detached', 'aluminium')
kg_app_rur_aluminium_out = preprocess(kg_app_rur_aluminium_i, kg_app_rur_aluminium_o, 'rural','appartments', 'aluminium')
kg_hig_rur_aluminium_out = preprocess(kg_hig_rur_aluminium_i, kg_hig_rur_aluminium_o, 'rural','high-rise', 'aluminium')
kg_det_urb_aluminium_out = preprocess(kg_det_urb_aluminium_i, kg_det_urb_aluminium_o, 'urban','detached', 'aluminium')
kg_sem_urb_aluminium_out = preprocess(kg_sem_urb_aluminium_i, kg_sem_urb_aluminium_o, 'urban','semi-detached', 'aluminium')
kg_app_urb_aluminium_out = preprocess(kg_app_urb_aluminium_i, kg_app_urb_aluminium_o, 'urban','appartments', 'aluminium')
kg_hig_urb_aluminium_out = preprocess(kg_hig_urb_aluminium_i, kg_hig_urb_aluminium_o, 'urban','high-rise', 'aluminium')
kg_office_aluminium_out = preprocess(kg_office_aluminium_i, kg_office_aluminium_o, 'commercial','office', 'aluminium')
kg_retail_aluminium_out = preprocess(kg_retail_aluminium_i, kg_retail_aluminium_o, 'commercial','retail', 'aluminium')
kg_hotels_aluminium_out = preprocess(kg_hotels_aluminium_i, kg_hotels_aluminium_o, 'commercial','hotels', 'aluminium')
kg_govern_aluminium_out = preprocess(kg_govern_aluminium_i, kg_govern_aluminium_o, 'commercial','govern', 'aluminium')
# glass output
kg_det_rur_glass_out = preprocess(kg_det_rur_glass_i, kg_det_rur_glass_o, 'rural','detached', 'glass')
kg_sem_rur_glass_out = preprocess(kg_sem_rur_glass_i, kg_sem_rur_glass_o, 'rural','semi-detached', 'glass')
kg_app_rur_glass_out = preprocess(kg_app_rur_glass_i, kg_app_rur_glass_o, 'rural','appartments', 'glass')
kg_hig_rur_glass_out = preprocess(kg_hig_rur_glass_i, kg_hig_rur_glass_o, 'rural','high-rise', 'glass')
kg_det_urb_glass_out = preprocess(kg_det_urb_glass_i, kg_det_urb_glass_o, 'urban','detached', 'glass')
kg_sem_urb_glass_out = preprocess(kg_sem_urb_glass_i, kg_sem_urb_glass_o, 'urban','semi-detached', 'glass')
kg_app_urb_glass_out = preprocess(kg_app_urb_glass_i, kg_app_urb_glass_o, 'urban','appartments', 'glass')
kg_hig_urb_glass_out = preprocess(kg_hig_urb_glass_i, kg_hig_urb_glass_o, 'urban','high-rise', 'glass')
kg_office_glass_out = preprocess(kg_office_glass_i, kg_office_glass_o, 'commercial','office', 'glass')
kg_retail_glass_out = preprocess(kg_retail_glass_i, kg_retail_glass_o, 'commercial','retail', 'glass')
kg_hotels_glass_out = preprocess(kg_hotels_glass_i, kg_hotels_glass_o, 'commercial','hotels', 'glass')
kg_govern_glass_out = preprocess(kg_govern_glass_i, kg_govern_glass_o, 'commercial','govern', 'glass')
# stack into 1 dataframe
frames = [kg_det_rur_steel_out[0], kg_det_rur_brick_out[0], kg_det_rur_concrete_out[0], kg_det_rur_wood_out[0], kg_det_rur_copper_out[0], kg_det_rur_aluminium_out[0], kg_det_rur_glass_out[0],
kg_sem_rur_steel_out[0], kg_sem_rur_brick_out[0], kg_sem_rur_concrete_out[0], kg_sem_rur_wood_out[0], kg_sem_rur_copper_out[0], kg_sem_rur_aluminium_out[0], kg_sem_rur_glass_out[0],
kg_app_rur_steel_out[0], kg_app_rur_brick_out[0], kg_app_rur_concrete_out[0], kg_app_rur_wood_out[0], kg_app_rur_copper_out[0], kg_app_rur_aluminium_out[0], kg_app_rur_glass_out[0],
kg_hig_rur_steel_out[0], kg_hig_rur_brick_out[0], kg_hig_rur_concrete_out[0], kg_hig_rur_wood_out[0], kg_hig_rur_copper_out[0], kg_hig_rur_aluminium_out[0], kg_hig_rur_glass_out[0],
kg_det_urb_steel_out[0], kg_det_urb_brick_out[0], kg_det_urb_concrete_out[0], kg_det_urb_wood_out[0], kg_det_urb_copper_out[0], kg_det_urb_aluminium_out[0], kg_det_urb_glass_out[0],
kg_sem_urb_steel_out[0], kg_sem_urb_brick_out[0], kg_sem_urb_concrete_out[0], kg_sem_urb_wood_out[0], kg_sem_urb_copper_out[0], kg_sem_urb_aluminium_out[0], kg_sem_urb_glass_out[0],
kg_app_urb_steel_out[0], kg_app_urb_brick_out[0], kg_app_urb_concrete_out[0], kg_app_urb_wood_out[0], kg_app_urb_copper_out[0], kg_app_urb_aluminium_out[0], kg_app_urb_glass_out[0],
kg_hig_urb_steel_out[0], kg_hig_urb_brick_out[0], kg_hig_urb_concrete_out[0], kg_hig_urb_wood_out[0], kg_hig_urb_copper_out[0], kg_hig_urb_aluminium_out[0], kg_hig_urb_glass_out[0],
kg_office_steel_out[0], kg_office_brick_out[0], kg_office_concrete_out[0], kg_office_wood_out[0], kg_office_copper_out[0], kg_office_aluminium_out[0], kg_office_glass_out[0],
kg_retail_steel_out[0], kg_retail_brick_out[0], kg_retail_concrete_out[0], kg_retail_wood_out[0], kg_retail_copper_out[0], kg_retail_aluminium_out[0], kg_retail_glass_out[0],
kg_hotels_steel_out[0], kg_hotels_brick_out[0], kg_hotels_concrete_out[0], kg_hotels_wood_out[0], kg_hotels_copper_out[0], kg_hotels_aluminium_out[0], kg_hotels_glass_out[0],
kg_govern_steel_out[0], kg_govern_brick_out[0], kg_govern_concrete_out[0], kg_govern_wood_out[0], kg_govern_copper_out[0], kg_govern_aluminium_out[0], kg_govern_glass_out[0],
kg_det_rur_steel_out[1], kg_det_rur_brick_out[1], kg_det_rur_concrete_out[1], kg_det_rur_wood_out[1], kg_det_rur_copper_out[1], kg_det_rur_aluminium_out[1], kg_det_rur_glass_out[1],
kg_sem_rur_steel_out[1], kg_sem_rur_brick_out[1], kg_sem_rur_concrete_out[1], kg_sem_rur_wood_out[1], kg_sem_rur_copper_out[1], kg_sem_rur_aluminium_out[1], kg_sem_rur_glass_out[1],
kg_app_rur_steel_out[1], kg_app_rur_brick_out[1], kg_app_rur_concrete_out[1], kg_app_rur_wood_out[1], kg_app_rur_copper_out[1], kg_app_rur_aluminium_out[1], kg_app_rur_glass_out[1],
kg_hig_rur_steel_out[1], kg_hig_rur_brick_out[1], kg_hig_rur_concrete_out[1], kg_hig_rur_wood_out[1], kg_hig_rur_copper_out[1], kg_hig_rur_aluminium_out[1], kg_hig_rur_glass_out[1],
kg_det_urb_steel_out[1], kg_det_urb_brick_out[1], kg_det_urb_concrete_out[1], kg_det_urb_wood_out[1], kg_det_urb_copper_out[1], kg_det_urb_aluminium_out[1], kg_det_urb_glass_out[1],
kg_sem_urb_steel_out[1], kg_sem_urb_brick_out[1], kg_sem_urb_concrete_out[1], kg_sem_urb_wood_out[1], kg_sem_urb_copper_out[1], kg_sem_urb_aluminium_out[1], kg_sem_urb_glass_out[1],
kg_app_urb_steel_out[1], kg_app_urb_brick_out[1], kg_app_urb_concrete_out[1], kg_app_urb_wood_out[1], kg_app_urb_copper_out[1], kg_app_urb_aluminium_out[1], kg_app_urb_glass_out[1],
kg_hig_urb_steel_out[1], kg_hig_urb_brick_out[1], kg_hig_urb_concrete_out[1], kg_hig_urb_wood_out[1], kg_hig_urb_copper_out[1], kg_hig_urb_aluminium_out[1], kg_hig_urb_glass_out[1],
kg_office_steel_out[1], kg_office_brick_out[1], kg_office_concrete_out[1], kg_office_wood_out[1], kg_office_copper_out[1], kg_office_aluminium_out[1], kg_office_glass_out[1],
kg_retail_steel_out[1], kg_retail_brick_out[1], kg_retail_concrete_out[1], kg_retail_wood_out[1], kg_retail_copper_out[1], kg_retail_aluminium_out[1], kg_retail_glass_out[1],
kg_hotels_steel_out[1], kg_hotels_brick_out[1], kg_hotels_concrete_out[1], kg_hotels_wood_out[1], kg_hotels_copper_out[1], kg_hotels_aluminium_out[1], kg_hotels_glass_out[1],
kg_govern_steel_out[1], kg_govern_brick_out[1], kg_govern_concrete_out[1], kg_govern_wood_out[1], kg_govern_copper_out[1], kg_govern_aluminium_out[1], kg_govern_glass_out[1] ]
material_output = | pd.concat(frames) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([ | pd.Period("2014Q1") | pandas.Period |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
(( | pd.Timestamp("2015-01-15", tz="utc") | pandas.Timestamp |
import os
import time
import uuid
import yaml
import logging
import shutil
import numpy as np
import pandas as pd
import multiprocessing as mp
from functools import partial
from astropy.time import Time
from .config import Config
from .config import Configuration
from .clusters import find_clusters, filter_clusters_by_length
from .cell import Cell
from .orbit import TestOrbit
from .orbits import Orbits
from .orbits import generateEphemeris
from .orbits import initialOrbitDetermination
from .orbits import differentialCorrection
from .orbits import mergeAndExtendOrbits
from .observatories import getObserverState
from .utils import _initWorker
from .utils import _checkParallel
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
logger = logging.getLogger("thor")
__all__ = [
"rangeAndShift_worker",
"rangeAndShift",
"clusterVelocity",
"clusterVelocity_worker",
"clusterAndLink",
"runTHOROrbit",
"runTHOR",
]
def rangeAndShift_worker(observations, ephemeris, cell_area=10):
assert len(observations["mjd_utc"].unique()) == 1
assert len(ephemeris["mjd_utc"].unique()) == 1
assert observations["mjd_utc"].unique()[0] == ephemeris["mjd_utc"].unique()[0]
observation_time = observations["mjd_utc"].unique()[0]
# Create Cell centered on the sky-plane location of the
# test orbit
cell = Cell(
ephemeris[["RA_deg", "Dec_deg"]].values[0],
observation_time,
area=cell_area,
)
# Grab observations within cell
cell.getObservations(observations)
if len(cell.observations) != 0:
# Create test orbit with state of orbit at visit time
test_orbit = TestOrbit(
ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values[0],
observation_time
)
# Prepare rotation matrices
test_orbit.prepare()
# Apply rotation matrices and transform observations into the orbit's
# frame of motion.
test_orbit.applyToObservations(cell.observations)
projected_observations = cell.observations
else:
projected_observations = pd.DataFrame()
return projected_observations
def clusterVelocity(
obs_ids,
x,
y,
dt,
vx,
vy,
eps=0.005,
min_obs=5,
min_arc_length=1.0,
alg="hotspot_2d",
):
"""
Clusters THOR projection with different velocities
in the projection plane using `~scipy.cluster.DBSCAN`.
Parameters
----------
obs_ids : `~numpy.ndarray' (N)
Observation IDs.
x : `~numpy.ndarray' (N)
Projection space x coordinate in degrees or radians.
y : `~numpy.ndarray' (N)
Projection space y coordinate in degrees or radians.
dt : `~numpy.ndarray' (N)
Change in time from 0th exposure in units of MJD.
vx : `~numpy.ndarray' (N)
Projection space x velocity in units of degrees or radians per day in MJD.
vy : `~numpy.ndarray' (N)
Projection space y velocity in units of degrees or radians per day in MJD.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 0.005]
min_obs : int, optional
The number of samples (or total weight) in a neighborhood for a
point to be considered as a core point. This includes the point itself.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 5]
min_arc_length : float, optional
Minimum arc length in units of days for a cluster to be accepted.
Returns
-------
list
If clusters are found, will return a list of numpy arrays containing the
observation IDs for each cluster. If no clusters are found, will return np.NaN.
"""
logger.debug(f"cluster: vx={vx} vy={vy} n_obs={len(obs_ids)}")
xx = x - vx * dt
yy = y - vy * dt
X = np.stack((xx, yy), 1)
clusters = find_clusters(X, eps, min_obs, alg=alg)
clusters = filter_clusters_by_length(
clusters, dt, min_obs, min_arc_length,
)
cluster_ids = []
for cluster in clusters:
cluster_ids.append(obs_ids[cluster])
if len(cluster_ids) == 0:
cluster_ids = np.NaN
return cluster_ids
def clusterVelocity_worker(
vx,
vy,
obs_ids=None,
x=None,
y=None,
dt=None,
eps=None,
min_obs=None,
min_arc_length=None,
alg=None
):
"""
Helper function to multiprocess clustering.
"""
cluster_ids = clusterVelocity(
obs_ids,
x,
y,
dt,
vx,
vy,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
)
return cluster_ids
def rangeAndShift(
observations,
orbit,
cell_area=10,
backend="PYOORB",
backend_kwargs={},
num_jobs=1,
parallel_backend="mp"
):
"""
Propagate the orbit to all observation times in observations. At each epoch gather a circular region of observations of size cell_area
centered about the location of the orbit on the sky-plane. Transform and project each of the gathered observations into
the frame of motion of the test orbit.
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame containing preprocessed observations.
Should contain the following columns:
obs_id : observation IDs
RA_deg : Right Ascension in degrees.
Dec_deg : Declination in degrees.
RA_sigma_deg : 1-sigma uncertainty for Right Ascension in degrees.
Dec_sigma_deg : 1-sigma uncertainty for Declination in degrees.
observatory_code : MPC observatory code
orbit : `~numpy.ndarray` (6)
Orbit to propagate. If backend is 'THOR', then these orbits must be expressed
as heliocentric ecliptic cartesian elements. If backend is 'PYOORB' orbits may be
expressed in keplerian, cometary or cartesian elements.
cell_area : float, optional
Cell's area in units of square degrees.
[Default = 10]
backend : {'THOR', 'PYOORB'}, optional
Which backend to use.
backend_kwargs : dict, optional
Settings and additional parameters to pass to selected
backend.
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
projected_observations : {`~pandas.DataFrame`, -1}
Observations dataframe (from cell.observations) with columns containing
projected coordinates.
"""
time_start = time.time()
logger.info("Running range and shift...")
logger.info("Assuming r = {} au".format(orbit.cartesian[0, :3]))
logger.info("Assuming v = {} au per day".format(orbit.cartesian[0, 3:]))
# Build observers dictionary: keys are observatory codes with exposure times (as astropy.time objects)
# as values
observers = {}
for code in observations["observatory_code"].unique():
observers[code] = Time(
observations[observations["observatory_code"].isin([code])]["mjd_utc"].unique(),
format="mjd",
scale="utc"
)
# Propagate test orbit to all times in observations
ephemeris = generateEphemeris(
orbit,
observers,
backend=backend,
backend_kwargs=backend_kwargs,
chunk_size=1,
num_jobs=1,
parallel_backend=parallel_backend
)
if backend == "FINDORB":
observer_states = []
for observatory_code, observation_times in observers.items():
observer_states.append(
getObserverState(
[observatory_code],
observation_times,
frame='ecliptic',
origin='heliocenter',
)
)
observer_states = pd.concat(observer_states)
observer_states.reset_index(
inplace=True,
drop=True
)
ephemeris = ephemeris.join(observer_states[["obs_x", "obs_y", "obs_z", "obs_vx", "obs_vy", "obs_vz"]])
velocity_cols = []
if backend != "PYOORB":
velocity_cols = ["obs_vx", "obs_vy", "obs_vz"]
observations = observations.merge(
ephemeris[["mjd_utc", "observatory_code", "obs_x", "obs_y", "obs_z"] + velocity_cols],
left_on=["mjd_utc", "observatory_code"],
right_on=["mjd_utc", "observatory_code"]
)
# Split the observations into a single dataframe per unique observatory code and observation time
# Basically split the observations into groups of unique exposures
observations_grouped = observations.groupby(by=["observatory_code", "mjd_utc"])
observations_split = [observations_grouped.get_group(g) for g in observations_grouped.groups]
# Do the same for the test orbit's ephemerides
ephemeris_grouped = ephemeris.groupby(by=["observatory_code", "mjd_utc"])
ephemeris_split = [ephemeris_grouped.get_group(g) for g in ephemeris_grouped.groups]
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
rangeAndShift_worker_ray = ray.remote(rangeAndShift_worker)
rangeAndShift_worker_ray = rangeAndShift_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for observations_i, ephemeris_i in zip(observations_split, ephemeris_split):
p.append(
rangeAndShift_worker_ray.remote(
observations_i,
ephemeris_i,
cell_area=cell_area
)
)
projected_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
projected_dfs = p.starmap(
partial(
rangeAndShift_worker,
cell_area=cell_area
),
zip(
observations_split,
ephemeris_split,
)
)
p.close()
else:
projected_dfs = []
for observations_i, ephemeris_i in zip(observations_split, ephemeris_split):
projected_df = rangeAndShift_worker(
observations_i,
ephemeris_i,
cell_area=cell_area
)
projected_dfs.append(projected_df)
projected_observations = pd.concat(projected_dfs)
if len(projected_observations) > 0:
projected_observations.sort_values(by=["mjd_utc", "observatory_code"], inplace=True)
projected_observations.reset_index(inplace=True, drop=True)
else:
projected_observations = pd.DataFrame(
columns=[
'obs_id', 'mjd_utc', 'RA_deg', 'Dec_deg', 'RA_sigma_deg',
'Dec_sigma_deg', 'observatory_code', 'obs_x', 'obs_y', 'obs_z', 'obj_x',
'obj_y', 'obj_z', 'theta_x_deg', 'theta_y_deg'
]
)
time_end = time.time()
logger.info("Found {} observations.".format(len(projected_observations)))
logger.info("Range and shift completed in {:.3f} seconds.".format(time_end - time_start))
return projected_observations
def clusterAndLink(
observations,
vx_range=[-0.1, 0.1],
vy_range=[-0.1, 0.1],
vx_bins=100,
vy_bins=100,
vx_values=None,
vy_values=None,
eps=0.005,
min_obs=5,
min_arc_length=1.0,
alg="dbscan",
num_jobs=1,
parallel_backend="mp"
):
"""
Cluster and link correctly projected (after ranging and shifting)
detections.
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame containing post-range and shift observations.
vx_range : {None, list or `~numpy.ndarray` (2)}
Maximum and minimum velocity range in x.
Will not be used if vx_values are specified.
[Default = [-0.1, 0.1]]
vy_range : {None, list or `~numpy.ndarray` (2)}
Maximum and minimum velocity range in y.
Will not be used if vy_values are specified.
[Default = [-0.1, 0.1]]
vx_bins : int, optional
Length of x-velocity grid between vx_range[0]
and vx_range[-1]. Will not be used if vx_values are
specified.
[Default = 100]
vy_bins: int, optional
Length of y-velocity grid between vy_range[0]
and vy_range[-1]. Will not be used if vy_values are
specified.
[Default = 100]
vx_values : {None, `~numpy.ndarray`}, optional
Values of velocities in x at which to cluster
and link.
[Default = None]
vy_values : {None, `~numpy.ndarray`}, optional
Values of velocities in y at which to cluster
and link.
[Default = None]
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 0.005]
min_obs : int, optional
The number of samples (or total weight) in a neighborhood for a
point to be considered as a core point. This includes the point itself.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 5]
alg: str
Algorithm to use. Can be "dbscan" or "hotspot_2d".
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
clusters : `~pandas.DataFrame`
DataFrame with the cluster ID, the number of observations, and the x and y velocity.
cluster_members : `~pandas.DataFrame`
DataFrame containing the cluster ID and the observation IDs of its members.
Notes
-----
The algorithm chosen can have a big impact on performance and accuracy.
alg="dbscan" uses the DBSCAN algorithm of Ester et. al. It's relatively slow
but works with high accuracy; it is certain to find all clusters with at
least min_obs points that are separated by at most eps.
alg="hotspot_2d" is much faster (perhaps 10-20x faster) than dbscan, but it
may miss some clusters, particularly when points are spaced a distance of 'eps'
apart.
"""
time_start_cluster = time.time()
logger.info("Running velocity space clustering...")
# Extract useful quantities
obs_ids = observations["obs_id"].values
theta_x = observations["theta_x_deg"].values
theta_y = observations["theta_y_deg"].values
mjd = observations["mjd_utc"].values
# Select detections in first exposure
first = np.where(mjd == mjd.min())[0]
mjd0 = mjd[first][0]
dt = mjd - mjd0
if vx_values is None and vx_range is not None:
vx = np.linspace(*vx_range, num=vx_bins)
elif vx_values is None and vx_range is None:
raise ValueError("Both vx_values and vx_range cannot be None.")
else:
vx = vx_values
vx_range = [vx_values[0], vx_values[-1]]
vx_bins = len(vx)
if vy_values is None and vy_range is not None:
vy = np.linspace(*vy_range, num=vy_bins)
elif vy_values is None and vy_range is None:
raise ValueError("Both vy_values and vy_range cannot be None.")
else:
vy = vy_values
vy_range = [vy_values[0], vy_values[-1]]
vy_bins = len(vy)
if vx_values is None and vy_values is None:
vxx, vyy = np.meshgrid(vx, vy)
vxx = vxx.flatten()
vyy = vyy.flatten()
elif vx_values is not None and vy_values is not None:
vxx = vx
vyy = vy
else:
raise ValueError("")
logger.debug("X velocity range: {}".format(vx_range))
if vx_values is not None:
logger.debug("X velocity values: {}".format(vx_bins))
else:
logger.debug("X velocity bins: {}".format(vx_bins))
logger.debug("Y velocity range: {}".format(vy_range))
if vy_values is not None:
logger.debug("Y velocity values: {}".format(vy_bins))
else:
logger.debug("Y velocity bins: {}".format(vy_bins))
if vx_values is not None:
logger.debug("User defined x velocity values: True")
else:
logger.debug("User defined x velocity values: False")
if vy_values is not None:
logger.debug("User defined y velocity values: True")
else:
logger.debug("User defined y velocity values: False")
if vx_values is None and vy_values is None:
logger.debug("Velocity grid size: {}".format(vx_bins * vy_bins))
else:
logger.debug("Velocity grid size: {}".format(vx_bins))
logger.info("Max sample distance: {}".format(eps))
logger.info("Minimum samples: {}".format(min_obs))
possible_clusters = []
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
clusterVelocity_worker_ray = ray.remote(clusterVelocity_worker)
clusterVelocity_worker_ray = clusterVelocity_worker_ray.options(
num_returns=1,
num_cpus=1
)
# Put all arrays (which can be large) in ray's
# local object store ahead of time
obs_ids_oid = ray.put(obs_ids)
theta_x_oid = ray.put(theta_x)
theta_y_oid = ray.put(theta_y)
dt_oid = ray.put(dt)
p = []
for vxi, vyi in zip(vxx, vyy):
p.append(
clusterVelocity_worker_ray.remote(
vxi,
vyi,
obs_ids=obs_ids_oid,
x=theta_x_oid,
y=theta_y_oid,
dt=dt_oid,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
)
)
possible_clusters = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker
)
possible_clusters = p.starmap(
partial(
clusterVelocity_worker,
obs_ids=obs_ids,
x=theta_x,
y=theta_y,
dt=dt,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
),
zip(vxx, vyy)
)
p.close()
else:
possible_clusters = []
for vxi, vyi in zip(vxx, vyy):
possible_clusters.append(
clusterVelocity(
obs_ids,
theta_x,
theta_y,
dt,
vxi,
vyi,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
)
)
time_end_cluster = time.time()
logger.info("Clustering completed in {:.3f} seconds.".format(time_end_cluster - time_start_cluster))
logger.info("Restructuring clusters...")
time_start_restr = time.time()
possible_clusters = | pd.DataFrame({"clusters": possible_clusters}) | pandas.DataFrame |
import argparse
import numpy as np
import pandas as pd
import os
import pickle
import langdetect as lang
import time
from datetime import datetime
import json
directory = 'data/twitter'
outfile = 'output.csv'
verbose = False
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('directory', type=str, default = directory)
parser.add_argument('outfile', type=str, default=outfile)
args = parser.parse_args()
return args
def DF_to_Dict(df, saveAs=''):
dict = {}
try:
dict['date'] = np.array(df['date'].tolist())
dict['retweets'] = np.array(df['retweets'].tolist())
dict['favorites'] = np.array(df['favorites'].tolist())
dict['text'] = np.array(df['text'].tolist())
dict['hashtags'] = np.array(df['hashtags'].tolist())
dict['id'] = np.array(df['id'].tolist())
dict['permalink'] = np.array(df['permalink'].tolist())
except:
dict['id'] = np.array(df['id'].tolist())
dict['date'] = np.array(df['date'].tolist())
dict['text'] = np.array(df['text'].tolist())
dict['upvotes'] = np.array(df['upvotes'].tolist())
if saveAs != '':
pickle.dump(dict, open(saveAs, 'wb'))
return dict
def get_files_of_type(type, directory):
if type[0] != '.': type = '.' + type
return [os.path.join(directory, f) for f in os.listdir(directory) if
os.path.isfile(os.path.join(directory, f)) and os.path.splitext(f)[1] == type]
def json_to_csv(jsonfile, saveAs='output.csv', index=0, opt='w'):
with open(jsonfile) as file:
data = json.load(file)
csv = open(saveAs, opt)
if opt == 'w': csv.write('id;date;text;upvotes')
for submission in data:
d = str(datetime.fromtimestamp(submission['SubmissionTime']))
t = '"' + str(submission['SubmissionTitle']) + '"'
u = str(submission['SubmitUpvotes'])
csv.write('\n' + ";".join([str(index), d, t, u]))
index += 1
for comment in submission['Comments']:
d = str(datetime.fromtimestamp(comment['CommentTime']))
t = '"' + str(comment['CommentText']) + '"'
u = str(comment['CommentUpvotes'])
csv.write('\n' + ";".join([str(index), d, t, u]))
index += 1
csv.close()
return index
def json_to_Dict(jsonfile, dict={}, saveAs=''):
with open(jsonfile) as file:
data = json.load(file)
if len(list(dict.keys())) == 0:
dict['id'] = np.array([])
dict['date'] = np.array([])
dict['text'] = np.array([])
dict['upvotes'] = np.array([])
index = len(dict['id'])
newids = []
newdates = []
newtext = []
newupvotes = []
for submission in data:
# newids += [index]
# newdates += [datetime.fromtimestamp(submission['SubmissionTime'])]
# newtext += [submission['SubmissionTitle']]
# newupvotes += [submission['SubmitUpvotes']]
# index += 1
for comment in submission['Comments']:
newids += [int(index)]
newdates += [float(comment['CommentTime'])]
newtext += [comment['CommentText']]
newupvotes += [int(comment['CommentUpvotes'])]
index += 1
if index > 100: break
dict['id'] = np.concatenate((dict['id'], np.array(newids)))
dict['date'] = np.concatenate((dict['date'], np.array(newdates)))
dict['text'] = np.concatenate((dict['text'], np.array(newtext)))
dict['upvotes'] = np.concatenate((dict['upvotes'], np.array(newupvotes)))
if saveAs != '':
pickle.dump(dict, open(saveAs, 'wb'))
return dict
def merge(directory, fileType='csv', saveAs=''):
if verbose: print("Merging files...", flush=True)
files = get_files_of_type(fileType, directory)
if 'csv' in fileType.lower():
split = os.path.split(saveAs)
csvFile = os.path.join(split[0], split[1].split('.')[0]) + '.csv'
df = pd.read_csv(files[0], header=0, sep=";", quoting=1, quotechar='"', error_bad_lines=False, warn_bad_lines=False)
for i in range(1, len(files)):
add = pd.read_csv(files[i], header=0, delimiter=";", quoting=1, quotechar='"', error_bad_lines=False, warn_bad_lines=False)
df = pd.concat([df, add])
df.to_csv(csvFile, sep=';', index=False, quoting=2)
if verbose: print("Successfully merged {} files with {} lines.".format(len(files), df.shape[0]), flush=True)
dict = DF_to_Dict(df)
if 'json' in fileType.lower():
split = os.path.split(saveAs)
csvFile = os.path.join(split[0], split[1].split('.')[0]) + '.csv'
index = json_to_csv(files[0], saveAs=csvFile)
dict = json_to_Dict(files[0])
for i in range(1, len(files)):
index = json_to_csv(files[i], saveAs=csvFile, index=index, opt='a')
dict = json_to_Dict(files[i], dict=dict)
if 'dict' in fileType.lower():
dict = pickle.load(open(files[0], 'rb'))
keys = list(dict.keys())
for i in range(1, len(files)):
add = pickle.load(open(files[i], 'rb'))
for key in keys:
dict[key] = np.concatenate((dict[key], add[key]))
if saveAs != '':
pickle.dump(dict, open(saveAs, 'wb'))
return dict
def filter(dict, with_words=[], without_words=[], language = 'en', saveAs='', startFrame=-1, endFrame=-1):
d = dict.copy()
keys = list(d.keys())
if startFrame != -1 and endFrame != -1:
for key in keys:
d[key] = d[key][startFrame:]
text = d['text']
remove_indices = np.zeros(len(text), dtype=bool)
start = len(text)
if verbose: print("Filtering file with {} entries...".format(start), flush=True)
# if verbose: print("Time estimated to filter: {:.0f} minutes.".format(start*.011//60+1), flush=True)
language_filter = []
i = 0
z = 0
startTime = time.time()
for t in text:
try:
language_filter.append(lang.detect(t) != language)
except:
language_filter.append(True)
i += 1
if verbose and (time.time()-startTime)//60 > z:
z += 1
print("{:.2f}% of text filtered after {} minutes. Estimated {:.0f} minutes remaining.".format(i/start*100, z, (start-i)/i * z+1), flush=True)
remove_indices += language_filter
if len(with_words) != 0:
for word in with_words:
remove_indices += [word not in t for t in text]
if len(without_words) != 0:
for word in without_words:
remove_indices += [word in t for t in text]
for key in keys:
d[key] = d[key][~remove_indices]
if saveAs != '':
pickle.dump(d, open(saveAs, 'wb'))
end = len(d[keys[0]])
if verbose: print("Successfully filtered file from {} entries to {}.".format(start,end), flush=True)
return d
def merge_stocks(directory, saveAs=''):
files = get_files_of_type('csv', directory)
split = os.path.split(saveAs)
csvFile = os.path.join(split[0], split[1].split('.')[0]) + '.csv'
cols = ['time', 'open', 'high', 'low', 'close', 'volume']
df = pd.read_csv(files[0], index_col=0, header=None, sep=',')
df.columns = cols
df['volume'] *= ((df['close'] - df['open']) / 2 + df['open'])
for i in range(1, len(files)):
add = pd.read_csv(files[i], index_col=0, header=None, sep=',')
add.columns = cols
add['volume'] *= ((add['close'] - add['open']) / 2 + add['open'])
df = df.add(add, fill_value=0)
df.to_csv(csvFile, sep=',', index=True,index_label='date', quoting=3)
if saveAs != '':
pickle.dump(df, open(saveAs, 'wb'))
return df
def stock_to_DF(stockCSVFile, saveAs=''):
df = | pd.read_csv(stockCSVFile, header=0, sep=',') | pandas.read_csv |
"""
Tests that skipped rows are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from datetime import datetime
from io import StringIO
import numpy as np
import pytest
from pandas.errors import EmptyDataError
from pandas import DataFrame, Index
import pandas.util.testing as tm
@pytest.mark.parametrize("skiprows", [list(range(6)), 6])
def test_skip_rows_bug(all_parsers, skiprows):
# see gh-505
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
result = parser.read_csv(
StringIO(text), skiprows=skiprows, header=None, index_col=0, parse_dates=True
)
index = Index(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], name=0
)
expected = DataFrame(
np.arange(1.0, 10.0).reshape((3, 3)), columns=[1, 2, 3], index=index
)
tm.assert_frame_equal(result, expected)
def test_deep_skip_rows(all_parsers):
# see gh-4382
parser = all_parsers
data = "a,b,c\n" + "\n".join(
[",".join([str(i), str(i + 1), str(i + 2)]) for i in range(10)]
)
condensed_data = "a,b,c\n" + "\n".join(
[",".join([str(i), str(i + 1), str(i + 2)]) for i in [0, 1, 2, 3, 4, 6, 8, 9]]
)
result = parser.read_csv(StringIO(data), skiprows=[6, 8])
condensed_result = parser.read_csv(StringIO(condensed_data))
tm.assert_frame_equal(result, condensed_result)
def test_skip_rows_blank(all_parsers):
# see gh-9832
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = parser.read_csv(
StringIO(text), skiprows=6, header=None, index_col=0, parse_dates=True
)
index = Index(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], name=0
)
expected = DataFrame(
np.arange(1.0, 10.0).reshape((3, 3)), columns=[1, 2, 3], index=index
)
tm.assert_frame_equal(data, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""id,text,num_lines
1,"line 11
line 12",2
2,"line 21
line 22",2
3,"line 31",1""",
dict(skiprows=[1]),
DataFrame(
[[2, "line 21\nline 22", 2], [3, "line 31", 1]],
columns=["id", "text", "num_lines"],
),
),
(
"a,b,c\n~a\n b~,~e\n d~,~f\n f~\n1,2,~12\n 13\n 14~",
dict(quotechar="~", skiprows=[2]),
DataFrame([["a\n b", "e\n d", "f\n f"]], columns=["a", "b", "c"]),
),
(
(
"Text,url\n~example\n "
"sentence\n one~,url1\n~"
"example\n sentence\n two~,url2\n~"
"example\n sentence\n three~,url3"
),
dict(quotechar="~", skiprows=[1, 3]),
DataFrame([["example\n sentence\n two", "url2"]], columns=["Text", "url"]),
),
],
)
def test_skip_row_with_newline(all_parsers, data, kwargs, expected):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_row_with_quote(all_parsers):
# see gh-12775 and gh-10911
parser = all_parsers
data = """id,text,num_lines
1,"line '11' line 12",2
2,"line '21' line 22",2
3,"line '31' line 32",1"""
exp_data = [[2, "line '21' line 22", 2], [3, "line '31' line 32", 1]]
expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])
result = parser.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,exp_data",
[
(
"""id,text,num_lines
1,"line \n'11' line 12",2
2,"line \n'21' line 22",2
3,"line \n'31' line 32",1""",
[[2, "line \n'21' line 22", 2], [3, "line \n'31' line 32", 1]],
),
(
"""id,text,num_lines
1,"line '11\n' line 12",2
2,"line '21\n' line 22",2
3,"line '31\n' line 32",1""",
[[2, "line '21\n' line 22", 2], [3, "line '31\n' line 32", 1]],
),
(
"""id,text,num_lines
1,"line '11\n' \r\tline 12",2
2,"line '21\n' \r\tline 22",2
3,"line '31\n' \r\tline 32",1""",
[[2, "line '21\n' \r\tline 22", 2], [3, "line '31\n' \r\tline 32", 1]],
),
],
)
def test_skip_row_with_newline_and_quote(all_parsers, data, exp_data):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), skiprows=[1])
expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"line_terminator", ["\n", "\r\n", "\r"] # "LF" # "CRLF" # "CR"
)
def test_skiprows_lineterminator(all_parsers, line_terminator):
# see gh-9079
parser = all_parsers
data = "\n".join(
[
"SMOSMANIA ThetaProbe-ML2X ",
"2007/01/01 01:00 0.2140 U M ",
"2007/01/01 02:00 0.2141 M O ",
"2007/01/01 04:00 0.2142 D M ",
]
)
expected = DataFrame(
[
["2007/01/01", "01:00", 0.2140, "U", "M"],
["2007/01/01", "02:00", 0.2141, "M", "O"],
["2007/01/01", "04:00", 0.2142, "D", "M"],
],
columns=["date", "time", "var", "flag", "oflag"],
)
if parser.engine == "python" and line_terminator == "\r":
pytest.skip("'CR' not respect with the Python parser yet")
data = data.replace("\n", line_terminator)
result = parser.read_csv(
StringIO(data),
skiprows=1,
delim_whitespace=True,
names=["date", "time", "var", "flag", "oflag"],
)
tm.assert_frame_equal(result, expected)
def test_skiprows_infield_quote(all_parsers):
# see gh-14459
parser = all_parsers
data = 'a"\nb"\na\n1'
expected = | DataFrame({"a": [1]}) | pandas.DataFrame |
'''
#Recommendation System
GOAL -
1. Features cast, director, country, rating and genres
'''
import numpy as np
import pandas as pd
import re
netflix = pd.read_csv(r"/content/drive/MyDrive/Colab Notebooks/netflix_titles.csv",index_col="show_id")
netflix.head()
netflix.groupby('type').count()
#Remove unwanted columns
netflix = netflix.dropna(subset=['cast', 'country', 'rating'])
#Developing Recommendation Engine using cast, director, country, rating and genres
movies_df = netflix[netflix['type'] == 'Movie'].reset_index()
movies_df = movies_df.drop(['show_id', 'type', 'date_added', 'release_year', 'duration', 'description'], axis=1)
movies_df.head()
#TV_Shows
tv_df = netflix[netflix['type'] == 'TV Show'].reset_index()
tv_df = tv_df.drop(['show_id', 'type', 'date_added', 'release_year', 'duration', 'description'], axis=1)
tv_df.head()
#Generating Binary data for Recommendations
actors = []
for i in movies_df['cast']:
actor = re.split(r', \s*', i)
actors.append(actor)
print(actors)
flat_list = []
for sublist in actors:
for item in sublist:
flat_list.append(item)
print(flat_list)
actors_list = sorted(set(flat_list))
print(actors_list)
binary_actors = [[0] * 0 for i in range(len(set(flat_list)))]
print(binary_actors)
for i in movies_df['cast']:
k = 0
for j in actors_list:
if j in i:
binary_actors[k].append(1.0)
else:
binary_actors[k].append(0.0)
k+=1
binary_actors = | pd.DataFrame(binary_actors) | pandas.DataFrame |
import sys
import sklearn.preprocessing as pp
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import cpu_count
from measures import *
from scipy import spatial
from math import sqrt
import jsonlines
import operator
import scipy
import pickle
import numpy as np
import pandas as pd
import time
import json
THREADS = 16
def tuple_dict_from_ratings(data):
ratings_dict = dict()
for film_id in list(data.keys()):
for ratings in data[film_id]:
tuple_key = (film_id, ratings["user_id"])
ratings_dict[tuple_key] = int(ratings["user_rating"])
return ratings_dict
def map_aspect_values_to_movies(x):
(film, meta), aspect = x
aspects = dict()
if aspect == "director":
aspects[meta[aspect]] = 1
else:
for g in meta[aspect]:
aspects[g] = 1
return film, meta, aspects
def dict_movie_aspect(paper_films, aspect):
paper_films_aspect_prepended = map(lambda e: (e, aspect), list(paper_films.items()))
aspect_dict = dict()
with ProcessPoolExecutor(max_workers=THREADS) as executor:
results = executor.map(map_aspect_values_to_movies, paper_films_aspect_prepended)
for film, meta, aspects in results:
aspect_dict[film + "_" + meta["title"]] = aspects
return aspect_dict
def map_user_profile_normalized(x):
df, user, movies_aspect_values = x
user_movies = df.loc[:, user]
profile = user_movies.dot(movies_aspect_values)
for name in list(movies_aspect_values.columns):
mav = movies_aspect_values.loc[:, name]
assert len(mav) == len(user_movies)
seen = 0
for i in range(len(mav)):
if mav[i] != 0 and user_movies[i] != 0:
seen += 1
if seen != 0:
profile[name] /= seen
return user, profile.to_dict()
def users_movie_aspect_preferences(movies_aspect_values, movies_watched, users):
df = pd.DataFrame.from_dict(movies_watched, orient='index')
df = df.replace(np.nan, 0)
users_aspects_prefs = dict()
with ProcessPoolExecutor(max_workers=THREADS) as executor:
results = executor.map(map_user_profile_normalized, [(df, user, movies_aspect_values) for user in users])
for user, user_profile in results:
users_aspects_prefs[user] = user_profile
return users_aspects_prefs
def viewed_matrix(ratings_cold_start, all_films):
user_ids = ratings_cold_start["userID"]
item_ids = ratings_cold_start["itemID"]
train_ratings = ratings_cold_start["rating"]
assert len(user_ids) == len(item_ids) == len(train_ratings)
movies_watched = dict()
for uid in all_films.keys():
movies_watched[uid + "_" + all_films[uid]["title"]] = dict()
for i in range(len(item_ids)):
current_user_id = user_ids[i]
current_item_id = item_ids[i]
current_rating = int(train_ratings[i])
try:
movies_watched[current_item_id + "_" + all_films[current_item_id]["title"]][current_user_id] = current_rating
except Exception:
print ('item id missing %s' % current_item_id) ## possibly the movies lacking info such as actors which are discarded
return movies_watched
def filter_unseen_movies(movies_genres, movies_watched):
seen_movie_genres = dict()
for k, v in movies_watched.items():
if movies_watched[k]:
seen_movie_genres[k] = movies_genres[k]
return seen_movie_genres
def user_prefs(movies_watched, movies_aspects, users, aspect_type):
movies_aspects = filter_unseen_movies(movies_aspects, movies_watched)
movies_aspects = pd.DataFrame.from_dict(movies_aspects, dtype='int64', orient='index')
movies_aspects = movies_aspects.replace(np.nan, 0)
return users_movie_aspect_preferences(movies_aspects, movies_watched, users)
def user_sim(users_genres_prefs):
users_genres_prefs = pd.DataFrame.from_dict(users_genres_prefs, orient='index')
user_ids_in_matrix = users_genres_prefs.index.values
users_genres_prefs = users_genres_prefs.T
users_genres_prefs = scipy.sparse.csc_matrix(users_genres_prefs.values)
normalized_matrix_by_column = pp.normalize(users_genres_prefs.tocsc(), norm='l2', axis=0)
cosine_sims = normalized_matrix_by_column.T * normalized_matrix_by_column
sims = dict()
for i in user_ids_in_matrix:
sims[i] = []
cosine_sims = cosine_sims.todok().items()
for ((row,col), sim) in cosine_sims:
if row != col:
sims[user_ids_in_matrix[row]].append((user_ids_in_matrix[col], sim))
return sims
def film_strength(user_id, film_id, films, ratings, all_actors, all_directors, all_genres, all_similarities, testing_users_cold_start_for_user, movies_genres, movies_directors, movies_actors):
nSimUsers = 20 # number of similar users to use
users_actors_prefs = testing_users_cold_start_for_user["actors"]
users_directors_prefs = testing_users_cold_start_for_user["directors"]
users_genres_prefs = testing_users_cold_start_for_user["genres"]
similarities_for_new_user = testing_users_cold_start_for_user["sims"]
simsSorted = sorted(similarities_for_new_user, key = operator.itemgetter(1), reverse = True)
sims = simsSorted[:nSimUsers]
film = films[film_id]
# mu constants for this user
MUR = 0.7
MUG = 0.8
MUA = 0.1
MUD = 0.1
# take an average of each of the the genre's average ratings
nGenres = 0
dGenres = 0
if type(film['genre']) is str:
film['genre'] = [film['genre']]
for genre in film['genre']:
aspect_value = movies_genres.loc[genre].to_dict()
movie_ids_with_aspect_value = [k.split("_")[0] for k,v in aspect_value.items() if v == 1]
# get the average rating for each film of that genre and take an average
nGenre = 0
dGenre = 0
for genrefilm in movie_ids_with_aspect_value:
avg_rat = average_rating(sims, genrefilm, ratings)
if avg_rat:
dGenre += avg_rat
nGenre += 1
if nGenre > 0:
avGenre = dGenre / nGenre
cmbGenre = ((((users_genres_prefs[genre]- 1) / 2)-1) + (MUR*avGenre)) / (1+MUR)
else:
cmbGenre = (((users_genres_prefs[genre]- 1) / 2)-1)
dGenres += cmbGenre
nGenres += 1
if nGenres > 0:
avgGenreRating = dGenres / nGenres
else:
avgGenreRating = 0
# take an average of each of the the actor's average ratings
nActors = 0
dActors = 0
if type(film['actors']) is str:
film['actors'] = [film['actors']]
for actor in film['actors']:
aspect_value = movies_actors.loc[actor].to_dict()
movie_ids_with_aspect_value = [k.split("_")[0] for k,v in aspect_value.items() if v == 1]
# get the average rating for each film with that actor and take an average
nActor = 0
dActor = 0
for actorfilm in movie_ids_with_aspect_value:
avg_rat = average_rating(sims, actorfilm, ratings)
if avg_rat:
dActor += avg_rat
nActor += 1
if nActor > 0:
avActor = dActor / nActor
cmbActor = ((((users_actors_prefs[actor]- 1) / 2)-1) + (MUR*avActor)) / (1+MUR)
else:
cmbActor = (((users_actors_prefs[actor]- 1) / 2)-1)
dActors += cmbActor
nActors += 1
if nActors > 0:
avgActorRating = dActors / nActors
else:
avgActorRating = 0
# take an average of each of the the director's average ratings
nDirectors = 0
dDirectors = 0
if type(film['director']) is str:
film['director'] = [film['director']]
for director in film['director']:
aspect_value = movies_directors.loc[director].to_dict()
movie_ids_with_aspect_value = [k.split("_")[0] for k,v in aspect_value.items() if v == 1]
# get the average rating for each film with that actor and take an average
nDirector = 0
dDirector = 0
for directorfilm in movie_ids_with_aspect_value:
avg_rat = average_rating(sims, directorfilm, ratings)
if avg_rat:
dDirector += avg_rat
nDirector += 1
if nDirector > 0:
avDirector = dDirector / nDirector
cmbDirector = ((((users_directors_prefs[director]- 1) / 2)-1) + (MUR*avDirector)) / (1+MUR)
else:
cmbDirector = (((users_directors_prefs[director]- 1) / 2)-1)
dDirectors += cmbDirector
nDirectors += 1
if nDirectors > 0:
avgDirectorRating = dDirectors / nDirectors
else:
avgDirectorRating = 0
# calculates the item strength
avg_rat = average_rating(sims, film_id, ratings)
if avg_rat is None:
item_strength = ((MUG * avgGenreRating) + (MUA * avgActorRating)+ (MUD * avgDirectorRating)) / (MUG + MUA + MUD)
else:
item_strength = ((MUR * avg_rat) + (MUG * avgGenreRating) + (MUA * avgActorRating)+ (MUD * avgDirectorRating)) / (MUR + MUG + MUA + MUD)
return (((item_strength + 1)*2)+1)
def average_rating(sims, film_id, ratings):
# counts and totals for each type of aspect
nRatings = 0
dRatings = 0
# cycles through each of the similar users
for sim in sims:
user_id = sim[0]
similarity = sim[1]
# if a rating exists by this user on the film
if (film_id, user_id) in ratings.keys():
user_rating = ratings[(film_id, user_id)]
scaled_rating = ((user_rating - 1) / 2)-1
dRatings += scaled_rating * similarity
nRatings += 1
if nRatings == 0:
avg_rat = None
else:
avg_rat = dRatings / nRatings
return avg_rat
if __name__ == "__main__":
start = time.time()
ratings = pickle.load(open("data/NETFLIX/movie_ratings_500_id.pkl","rb"))
films = pickle.load(open("data/NETFLIX/movie_metadata.pkl","rb"))
# create dict indexed by user for the rated movies
user_movie_ratings = dict()
for mid, uratings in ratings.items():
for urating in uratings:
uid = urating['user_id']
if uid not in user_movie_ratings:
user_movie_ratings[uid] = []
user_movie_ratings[uid].append((mid, urating['user_rating']))
train_ratings_dict = dict()
train_ratings_dict["userID"] = []
train_ratings_dict["itemID"] = []
train_ratings_dict["rating"] = []
compressed_test_ratings_dict = dict()
# if user rated >30, use 30 movies for testing and the remaining for training
# if user rated 10<=30, use 10 for testing and the remaining for training
for umv, fratings in user_movie_ratings.items():
if len(fratings) > 30:
for i in range(len(fratings)-30):
train_ratings_dict["userID"].append(umv)
train_ratings_dict["itemID"].extend([m for (m,r) in fratings[30:]])
train_ratings_dict["rating"].extend([r for (m,r) in fratings[30:]])
compressed_test_ratings_dict[umv] = fratings[:30]
elif len(fratings) <= 30 and len(fratings) > 10:
for i in range(len(fratings)-10):
train_ratings_dict["userID"].append(umv)
train_ratings_dict["itemID"].extend([m for (m,r) in fratings[10:]])
train_ratings_dict["rating"].extend([r for (m,r) in fratings[10:]])
compressed_test_ratings_dict[umv] = fratings[:10]
sample_users = set(train_ratings_dict["userID"])
print ('NR USERS %d' % len(sample_users))
movies_genres = dict_movie_aspect(films, "genre")
movies_genres = | pd.DataFrame.from_dict(movies_genres, dtype='int64', orient='index') | pandas.DataFrame.from_dict |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
( | pd.Index(["red", "white", "yellow"]) | pandas.Index |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = pd.DatetimeIndex(data=[level])
continue
elif level is None:
levels[i] = pd.Index([])
continue
levels[i] = pd.Index([level])
return levels
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class ConvertValueSafeTestCase(unittest.TestCase):
def test__convert_value_safe__float(self):
targ = 5.5
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__str(self):
targ = 'test'
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__bytes(self):
targ = 'test'
value = b'test'
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
def test__convert_value_safe__numpy_int_scalar(self):
targ = 5
value = np.array(5)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_float_scalar(self):
targ = 5.
value = np.array(5.)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_unicode_scalar(self):
targ = u'test'
value = np.array('test', dtype='U')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_str_scalar(self):
targ = u'test'
value = np.array('test', dtype='S')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__quantity_scalar(self):
targ = (10., 'ms')
value = 10. * pq.ms
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res[0], 'dtype'))
self.assertFalse(hasattr(res[0], 'units'))
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SpiketrainToDataframeTestCase(unittest.TestCase):
def test__spiketrain_to_dataframe__parents_empty(self):
obj = fake_neo('SpikeTrain', seed=0)
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, child_first=False)
res3 = ep.spiketrain_to_dataframe(obj, parents=True)
res4 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=True)
res5 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=False)
res6 = ep.spiketrain_to_dataframe(obj, parents=False)
res7 = ep.spiketrain_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
self.assertEqual(len(obj), len(res4.index))
self.assertEqual(len(obj), len(res5.index))
self.assertEqual(len(obj), len(res6.index))
self.assertEqual(len(obj), len(res7.index))
self.assertEqual(len(obj), len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(['spike_number'], res4.index.names)
self.assertEqual(['spike_number'], res5.index.names)
self.assertEqual(['spike_number'], res6.index.names)
self.assertEqual(['spike_number'], res7.index.names)
self.assertEqual(['spike_number'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, parents=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=True)
res3 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=True)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, child_first=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EventToDataframeTestCase(unittest.TestCase):
def test__event_to_dataframe__parents_empty(self):
obj = fake_neo('Event', seed=42)
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, child_first=False)
res3 = ep.event_to_dataframe(obj, parents=True)
res4 = ep.event_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.event_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.event_to_dataframe(obj, parents=False)
res7 = ep.event_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.event_to_dataframe(obj, parents=False, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(['times'], res4.index.names)
self.assertEqual(['times'], res5.index.names)
self.assertEqual(['times'], res6.index.names)
self.assertEqual(['times'], res7.index.names)
self.assertEqual(['times'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, parents=False)
res1 = ep.event_to_dataframe(obj, parents=False, child_first=False)
res2 = ep.event_to_dataframe(obj, parents=False, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, parents=True)
res3 = ep.event_to_dataframe(obj, parents=True, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, child_first=False)
res1 = ep.event_to_dataframe(obj, parents=True, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EpochToDataframeTestCase(unittest.TestCase):
def test__epoch_to_dataframe__parents_empty(self):
obj = fake_neo('Epoch', seed=42)
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, child_first=False)
res3 = ep.epoch_to_dataframe(obj, parents=True)
res4 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.epoch_to_dataframe(obj, parents=False)
res7 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual([u'durations', u'times'], res4.index.names)
self.assertEqual([u'durations', u'times'], res5.index.names)
self.assertEqual([u'durations', u'times'], res6.index.names)
self.assertEqual([u'durations', u'times'], res7.index.names)
self.assertEqual([u'durations', u'times'], res8.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
self.assertEqual(2, len(res4.index.levels))
self.assertEqual(2, len(res5.index.levels))
self.assertEqual(2, len(res6.index.levels))
self.assertEqual(2, len(res7.index.levels))
self.assertEqual(2, len(res8.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
assert_array_equal(targindex, res4.index.levels)
assert_array_equal(targindex, res5.index.levels)
assert_array_equal(targindex, res6.index.levels)
assert_array_equal(targindex, res7.index.levels)
assert_array_equal(targindex, res8.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, parents=False)
res1 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=True)
res3 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, child_first=False)
res1 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
| assert_index_equal(value, level) | pandas.util.testing.assert_index_equal |
# Utilities for dealing with survey data
import pandas as pd
# The below mapping works only with Corona dataset. Adjust them to your own need.
PHQ2_MAP = {
'Little interest or pleasure in doing things.' : 'PHQ2_1',
'Feeling down; depressed or hopeless.' : 'PHQ2_2',
}
PSQI_MAP = {
'Currently; is your sleep typically interrupted? (For example; for attending to a child or due to loud neighbours or medical reasons.)' : 'PSQI_1',
'During the past month; how often have you taken medicine (prescribed or “over the counter”) to help you sleep?' : 'PSQI_2',
'During the past month; how often have you had trouble staying awake while driving; eating meals; or engaging in social activity?' : 'PSQI_3',
'During the past month; how much of a problem has it been for you to keep up enthusiasm to get things done?' : 'PSQI_4',
'During the past month; how would you rate your sleep quality overall?' : 'PSQI_5',
'When have you usually gone to bed? (hh:mm)' : 'PSQI_6',
'What time have you usually gotten up in the morning? (hh:mm)' : 'PSQI_7',
'How long (in minutes) has it taken you to fall asleep each night?' : 'PSQI_8',
'How many hours of actual sleep did you get at night?' : 'PSQI_9',
}
PSS10_MAP = {
'In the last month; how often have you been upset because of something that happened unexpectedly?' : 'PSS10_1',
'In the last month; how often have you felt that you were unable to control the important things in your life?' : 'PSS10_2',
'In the last month; how often have you felt nervous and “stressed”?' : 'PSS10_3',
'In the last month; how often have you felt confident about your ability to handle your personal problems?' : 'PSS10_4',
'In the last month; how often have you felt that things were going your way?' : 'PSS10_5',
'In the last month; how often have you been able to control irritations in your life?' : 'PSS10_6',
'In the last month; how often have you felt that you were on top of things?' : 'PSS10_7',
'In the last month; how often have you been angered because of things that were outside of your control?' : 'PSS10_8',
'In the last month; how often have you felt difficulties were piling up so high that you could not overcome them?' : 'PSS10_9',
}
PANAS_MAP = {
'Upset': 'pre_upset',
'Hostile': 'pre_hostile',
'Alert': 'pre_alert',
'Ashamed': 'pre_ashamed',
'Inspired': 'pre_inspired',
'Nervous': 'pre_nervous',
'Determined': 'pre_determined',
'Attentive': 'pre_attentive',
'Afraid': 'pre_afraid',
'Active': 'pre_active',
'Upset.1': 'during_upset',
'Hostile.1': 'during_hostile',
'Alert.1': 'during_alert',
'Ashamed.1': 'during_ashamed',
'Inspired.1': 'during_inspired',
'Nervous.1': 'during_nervous',
'Determined.1': 'during_determined',
'Attentive.1': 'during_attentive',
'Afraid.1': 'during_afraid',
'Active.1': 'during_active'
}
GAD2_MAP = {
'Feeling nervous; anxious or on edge.': 'GAD2_1',
'Not being able to stop or control worrying.': 'GAD2_2'
}
PSS_ANSWER_MAP = {
'never': 0,
'almost-never': 1,
'sometimes': 2,
'fairly-often': 3,
'very-often': 4
}
PHQ2_ANSWER_MAP = {
'not-at-all': 0,
'several-days': 1,
'more-than-half-the-days': 2,
'nearly-every-day': 3
}
# use this mapping for prefix option, so that multiple question id's can be processed
# simultaneuously
ID_MAP_PREFIX = {'PSS' : PSS_ANSWER_MAP,
'PHQ2' : PHQ2_ANSWER_MAP,
'GAD2' : PHQ2_ANSWER_MAP}
# use this mapping if you want to explicitly specify the mapping for each question
ID_MAP = {'PSS10_1' : PSS_ANSWER_MAP,
'PSS10_2' : PSS_ANSWER_MAP,
'PSS10_3' : PSS_ANSWER_MAP,
'PSS10_4' : PSS_ANSWER_MAP,
'PSS10_5' : PSS_ANSWER_MAP,
'PSS10_6' : PSS_ANSWER_MAP,
'PSS10_7' : PSS_ANSWER_MAP,
'PSS10_8' : PSS_ANSWER_MAP,
'PSS10_9' : PSS_ANSWER_MAP}
def survey_convert_to_numerical_answer(df, answer_col, question_id, id_map, use_prefix=False):
"""Convert text answers into numerical value (assuming a long dataframe).
Use answer mapping dictionaries provided by the uses to convert the answers.
Can convert multiple questions having same prefix (e.g., PSS10_1, PSS10_2, ...,PSS10_9)
at same time if prefix mapping is provided. Function returns original values for the
answers that have not been specified for conversion.
Parameters
----------
df : pandas dataframe
Dataframe containing the questions
answer_col : str
Name of the column containing the answers
question_id : str
Name of the column containing the question id.
id_map : dictionary
Dictionary containing answer mappings (value) for each question_id (key),
or a dictionary containing a map for each question id prefix if use_prefix
option is used.
use_prefix : boolean
If False, uses given map (id_map) to convert questions. The default is False.
If True, use question id prefix map, so that multiple question_id's having
the same prefix may be converted on the same time.
Returns
-------
result : pandas series
Series containing converted values and original values for aswers hat are not
supposed to be converted.
"""
assert isinstance(df, pd.DataFrame), "df is not a pandas dataframe."
assert isinstance(answer_col, str), "answer_col is not a string."
assert isinstance(question_id, str), "question_id is not a string."
assert isinstance(id_map, dict), "id_map is not a dictionary."
assert isinstance(use_prefix, bool), "use_prefix is not a bool."
# copy original answers
result = df[answer_col]
for key,value in id_map.items():
if use_prefix == True:
temp = df[df[question_id].str.startswith(key)][answer_col]
else:
temp = df[df[question_id] == key][answer_col]
temp = temp.replace(value)
result.loc[temp.index] = temp[:]
del temp
return result
def survey_print_statistic(df, question_id = 'id', answer_col = 'answer', prefix=None, group=None):
'''
Return survey statistic. The statistic includes min, max, average and s.d values.
:param df:
DataFrame contains survey score.
:param question_id: string.
Column contains question id.
:param answer:
Column contains answer in numerical values.
:param prefix: list.
List contains survey prefix. If None is given, search question_id for all possible categories.
Return: dict
A dictionary contains summary of each questionaire category.
Example: {'PHQ9': {'min': 3, 'max': 8, 'avg': 4.5, 'std': 2}}
'''
def calculate_statistic(df, prefix, answer_col, group=None):
d = {}
if group:
assert isinstance(group, str),"group is not given in string format"
# Groupby, aggregate and extract statistic from answer column
agg_df = df.groupby(['user', group]) \
.agg({'answer': sum}) \
.groupby(group) \
.agg({'answer': ['mean', 'min', 'max','std']})
agg_df.columns = agg_df.columns.get_level_values(1) #flatten columns
agg_df = agg_df.rename(columns={'': group}).reset_index() # reassign group column
lst = []
for index, row in agg_df.iterrows():
temp = {'min': row['min'], 'max': row['max'],
'avg': row['mean'], 'std': row['std']}
d[(prefix,row[group])] = temp
else:
agg_df = df.groupby('user').agg({answer_col: sum})
d[prefix] = {'min': agg_df[answer_col].min(), 'max': agg_df[answer_col].max(),
'avg': agg_df[answer_col].mean(), 'std': agg_df[answer_col].std()}
return d
res = {}
# Collect questions with the given prefix. Otherwise, collect all prefix, assuming that
# the question id follows this format: {prefix}_id.
if prefix:
if isinstance(prefix, str):
temp = df[df[question_id].str.startswith(prefix)]
return calculate_statistic(temp, prefix, answer_col, group)
elif isinstance(prefix, list):
for pr in prefix:
temp = df[df[question_id].str.startswith(pr)]
d = calculate_statistic(temp, prefix, answer_col, group)
res.update(d)
else:
raise ValueError('prefix should be either list or string')
else:
# Search for all possible prefix (extract everything before the '_' delimimeter)
# Then compute statistic as usual
prefix_lst = list(set(df[question_id].str.split('_').str[0]))
for pr in prefix_lst:
temp = df[df[question_id].str.startswith(pr)]
d = calculate_statistic(temp, pr, answer_col, group)
res.update(d)
return res
def survey_sum_scores(df, survey_prefix, answer_column='answer', id_column='id'):
"""Sum all columns (like ``PHQ9_*``) to get a survey score.
Input dataframe: has a DateTime index, an answer_column with numeric
scores, and an id_column with question IDs like "PHQ9_1", "PHQ9_2",
etc. The given survey_prefix is the "PHQ9" (no underscore) part
which selects the right questions (rows not matching this prefix
won't be included).
This assumes that all surveys have a different time.
survey: The survey prefix in the 'id' column, e.g. 'PHQ9'. An '_' is appended.
"""
if survey_prefix is not None:
answers = df[df[id_column].str.startswith(survey_prefix+'_')]
else:
answers = df
answers[answer_column] = pd.to_numeric(answers[answer_column])
# Group by both user and indxe. I make this groupby_columns to be
# able to select both the index and the user, when you don't know
# what name the index might have.
groupby_columns = [ ]
if 'user' in answers.columns:
groupby_columns.append(df['user'])
groupby_columns.append(df.index)
#
survey_score = answers.groupby(groupby_columns)[answer_column].apply(lambda x: x.sum(skipna=False))
survey_score = survey_score.to_frame()
survey_score = survey_score.rename({'answer': 'score'}, axis='columns')
return survey_score
# Move to analysis layer
def daily_affect_variability(questions, subject=None):
""" Returns two DataFrames corresponding to the daily affect variability and
mean daily affect, both measures defined in the OLO paper available in
10.1371/journal.pone.0110907. In brief, the mean daily affect computes the
mean of each of the 7 questions (e.g. sad, cheerful, tired) asked in a
likert scale from 0 to 7. Conversely, the daily affect viariability computes
the standard deviation of each of the 7 questions.
NOTE: This function aggregates data by day.
Parameters
----------
questions: DataFrame with subject data (or database for backwards compatibility)
subject: string, optional (backwards compatibility only, in the future do filtering before).
Returns
-------
DLA_mean: mean of the daily affect
DLA_std: standard deviation of the daily affect
"""
# TODO: The daily summary (mean/std) seems useful, can we generalize?
# Backwards compatibilty if a database was passed
if isinstance(questions, niimpy.database.Data1):
questions = questions.raw(table='AwareHyksConverter', user=subject)
# Maintain backwards compatibility in the case subject was passed and
# questions was *not* a dataframe.
elif isinstance(subject, string):
questions = questions[questions['user'] == subject]
questions=questions[(questions['id']=='olo_1_1') | (questions['id']=='olo_1_2') | (questions['id']=='olo_1_3') | (questions['id']=='olo_1_4') | (questions['id']=='olo_1_5') | (questions['id']=='olo_1_6') | (questions['id']=='olo_1_7') | (questions['id']=='olo_1_8')]
questions['answer']= | pd.to_numeric(questions['answer']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 10:35:24 2019
@author: Elkoumy
"""
import sys
import pandas as pd
import time
from datetime import datetime
def to_list(s):
return list(s)
def parse_time(x):
res=0
try:
res=int(time.mktime(datetime.strptime(x,"%Y-%m-%d %H:%M:%S.%f%z").timetuple()))
except:
res=int(time.mktime(datetime.strptime(x,"%Y-%m-%d %H:%M:%S%z").timetuple()))
return res
#input_file = sys.argv[1]
#output_file = sys.argv[2]
input_file=r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Github\SecureMPCBPM\data_and_preprocessing\BPI_Challenge_2017_3_columns.csv"
output_dir=r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Github\SecureMPCBPM\data_and_preprocessing"
data = pd.read_csv(input_file)
new_case_ids = pd.Index(data['case'].unique())
data['case'] = data['case'].apply(lambda x: new_case_ids.get_loc(x))
#df['event'] = pd.util.hash_pandas_object(df['event'],index=False)
""" generating relative time"""
#data['completeTime'] = data['completeTime'].apply(lambda x: int(time.mktime(datetime.strptime(x,"%Y-%m-%d %H:%M:%S.%f%z").timetuple())))
data['completeTime'] = data['completeTime'].apply(parse_time)
data.completeTime=data.completeTime-min(data.completeTime)
""" Generating binary representation of the events """
#moving event to the last column
data=data[['case','completeTime','event']]
unique_events = list(data.event.unique())
#
ini_binary = "0"*(len(unique_events)-1)+"1"
event_idx= {}
for event in unique_events:
event_idx[event]= ini_binary
ini_binary= ini_binary[1:]+"0"
bits_column_names=["b"+str(i) for i in range(0,len(unique_events))]
data.event=data.event.apply(lambda x: event_idx[x])
temp= data.event.apply(to_list)
temp= pd.DataFrame.from_items(zip(temp.index, temp.values)).T
data[bits_column_names]=temp
""" splitting the file over partyA and partyB """
party_A=pd.DataFrame()
party_B=pd.DataFrame()
party_A=data[~data.event.isin(['00000000000000001000000000','00000000000000001000000000','00000000000000010000000000'])]
party_B=data[data.event.isin(['00000000000000001000000000','00000000000000001000000000','00000000000000010000000000'])]
""" performing padding """
''' party A '''
counts = party_A.groupby("case").count().event
max_count= counts.max()
need_increase=counts[counts<max_count]
difference=max_count-need_increase
padded_value=[]
for i in difference.index:
temp= difference[i] *[[i,0,0]]
padded_value=padded_value+temp
padded_value=pd.DataFrame.from_records(padded_value)
for i in range(0, len(unique_events)):
padded_value['b'+str(i)]=0
padded_value.columns=party_A.columns
party_A= party_A.append(padded_value , ignore_index=True)
party_A= party_A.sort_values(by=['case', 'completeTime'])
''' party B'''
counts = party_B.groupby("case").count().event
max_count= counts.max()
need_increase=counts[counts<max_count]
difference=max_count-need_increase
padded_value=[]
for i in difference.index:
temp= difference[i] *[[i,0,0]]
padded_value=padded_value+temp
padded_value= | pd.DataFrame.from_records(padded_value) | pandas.DataFrame.from_records |
from datetime import datetime
import re
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_extract_expand_kwarg_wrong_type_raises(any_string_dtype):
# TODO: should this raise TypeError
values = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)
with pytest.raises(ValueError, match="expand must be True or False"):
values.str.extract(".*(BAD[_]+).*(BAD)", expand=None)
def test_extract_expand_kwarg(any_string_dtype):
s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)
expected = DataFrame(["BAD__", np.nan, np.nan], dtype=any_string_dtype)
result = s.str.extract(".*(BAD[_]+).*")
tm.assert_frame_equal(result, expected)
result = s.str.extract(".*(BAD[_]+).*", expand=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
[["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=False)
tm.assert_frame_equal(result, expected)
def test_extract_expand_False_mixed_object():
ser = Series(
["aBAD_BAD", np.nan, "BAD_b_BAD", True, datetime.today(), "foo", None, 1, 2.0]
)
# two groups
result = ser.str.extract(".*(BAD[_]+).*(BAD)", expand=False)
er = [np.nan, np.nan] # empty row
expected = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er])
tm.assert_frame_equal(result, expected)
# single group
result = ser.str.extract(".*(BAD[_]+).*BAD", expand=False)
expected = Series(
["BAD_", np.nan, "BAD_", np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
def test_extract_expand_index_raises():
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(["A1", "A2", "A3", "A4", "B5"])
msg = "only one regex group is supported with Index"
with pytest.raises(ValueError, match=msg):
idx.str.extract("([AB])([123])", expand=False)
def test_extract_expand_no_capture_groups_raises(index_or_series, any_string_dtype):
s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype)
msg = "pattern contains no capture groups"
# no groups
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("[ABC][123]", expand=False)
# only non-capturing groups
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("(?:[AB]).*", expand=False)
def test_extract_expand_single_capture_group(index_or_series, any_string_dtype):
# single group renames series/index properly
s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype)
result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=False)
expected = index_or_series(["A", "A"], name="uno", dtype=any_string_dtype)
if index_or_series == Series:
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
def test_extract_expand_capture_groups(any_string_dtype):
s = Series(["A1", "B2", "C3"], dtype=any_string_dtype)
# one group, no matches
result = s.str.extract("(_)", expand=False)
expected = Series([np.nan, np.nan, np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two groups, no matches
result = s.str.extract("(_)(_)", expand=False)
expected = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one group, some matches
result = s.str.extract("([AB])[123]", expand=False)
expected = Series(["A", "B", np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two groups, some matches
result = s.str.extract("([AB])([123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one named group
result = s.str.extract("(?P<letter>[AB])", expand=False)
expected = Series(["A", "B", np.nan], name="letter", dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two named groups
result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# mix named and unnamed groups
result = s.str.extract("([AB])(?P<number>[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=[0, "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# one normal group, one non-capturing group
result = s.str.extract("([AB])(?:[123])", expand=False)
expected = Series(["A", "B", np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two normal groups, one non-capturing group
s = Series(["A11", "B22", "C33"], dtype=any_string_dtype)
result = s.str.extract("([AB])([123])(?:[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one optional group followed by one normal group
s = Series(["A1", "B2", "3"], dtype=any_string_dtype)
result = s.str.extract("(?P<letter>[AB])?(?P<number>[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, "3"]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# one normal group followed by one optional group
s = Series(["A1", "B2", "C"], dtype=any_string_dtype)
result = s.str.extract("(?P<letter>[ABC])(?P<number>[123])?", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], ["C", np.nan]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_extract_expand_capture_groups_index(index, any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/6348
# not passing index to the extractor
data = ["A1", "B2", "C"]
if len(index) < len(data):
pytest.skip("Index too short")
index = index[: len(data)]
s = Series(data, index=index, dtype=any_string_dtype)
result = s.str.extract(r"(\d)", expand=False)
expected = Series(["1", "2", np.nan], index=index, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
result = s.str.extract(r"(?P<letter>\D)(?P<number>\d)?", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], ["C", np.nan]],
columns=["letter", "number"],
index=index,
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_extract_single_series_name_is_preserved(any_string_dtype):
s = Series(["a3", "b3", "c2"], name="bob", dtype=any_string_dtype)
result = s.str.extract(r"(?P<sue>[a-z])", expand=False)
expected = Series(["a", "b", "c"], name="sue", dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
def test_extract_expand_True(any_string_dtype):
# Contains tests like those in test_match and some others.
s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)
result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=True)
expected = DataFrame(
[["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
def test_extract_expand_True_mixed_object():
er = [np.nan, np.nan] # empty row
mixed = Series(
[
"aBAD_BAD",
np.nan,
"BAD_b_BAD",
True,
datetime.today(),
"foo",
None,
1,
2.0,
]
)
result = mixed.str.extract(".*(BAD[_]+).*(BAD)", expand=True)
expected = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er])
tm.assert_frame_equal(result, expected)
def test_extract_expand_True_single_capture_group_raises(
index_or_series, any_string_dtype
):
# these should work for both Series and Index
# no groups
s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype)
msg = "pattern contains no capture groups"
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("[ABC][123]", expand=True)
# only non-capturing groups
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("(?:[AB]).*", expand=True)
def test_extract_expand_True_single_capture_group(index_or_series, any_string_dtype):
# single group renames series/index properly
s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype)
result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=True)
expected_dtype = "object" if index_or_series is Index else any_string_dtype
expected = DataFrame({"uno": ["A", "A"]}, dtype=expected_dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("name", [None, "series_name"])
def test_extract_series(name, any_string_dtype):
# extract should give the same result whether or not the series has a name.
s = Series(["A1", "B2", "C3"], name=name, dtype=any_string_dtype)
# one group, no matches
result = s.str.extract("(_)", expand=True)
expected = DataFrame([np.nan, np.nan, np.nan], dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
# two groups, no matches
result = s.str.extract("(_)(_)", expand=True)
expected = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one group, some matches
result = s.str.extract("([AB])[123]", expand=True)
expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
# two groups, some matches
result = s.str.extract("([AB])([123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one named group
result = s.str.extract("(?P<letter>[AB])", expand=True)
expected = DataFrame({"letter": ["A", "B", np.nan]}, dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
# two named groups
result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# mix named and unnamed groups
result = s.str.extract("([AB])(?P<number>[123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=[0, "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# one normal group, one non-capturing group
result = s.str.extract("([AB])(?:[123])", expand=True)
expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
def test_extract_optional_groups(any_string_dtype):
# two normal groups, one non-capturing group
s = Series(["A11", "B22", "C33"], dtype=any_string_dtype)
result = s.str.extract("([AB])([123])(?:[123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one optional group followed by one normal group
s = Series(["A1", "B2", "3"], dtype=any_string_dtype)
result = s.str.extract("(?P<letter>[AB])?(?P<number>[123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, "3"]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# one normal group followed by one optional group
s = Series(["A1", "B2", "C"], dtype=any_string_dtype)
result = s.str.extract("(?P<letter>[ABC])(?P<number>[123])?", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], ["C", np.nan]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_extract_dataframe_capture_groups_index(index, any_string_dtype):
# GH6348
# not passing index to the extractor
data = ["A1", "B2", "C"]
if len(index) < len(data):
pytest.skip("Index too short")
index = index[: len(data)]
s = Series(data, index=index, dtype=any_string_dtype)
result = s.str.extract(r"(\d)", expand=True)
expected = DataFrame(["1", "2", np.nan], index=index, dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
result = s.str.extract(r"(?P<letter>\D)(?P<number>\d)?", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], ["C", np.nan]],
columns=["letter", "number"],
index=index,
dtype=any_string_dtype,
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import numpy as np
import json
from os.path import join
import pandas as pd
import torch
import logging
import tempfile
import subprocess as sp
from datetime import timedelta
from time import time
from itertools import combinations
from pyrouge import Rouge155
from pyrouge.utils import log
from rouge import Rouge
from fastNLP.core.losses import LossBase
from fastNLP.core.metrics import MetricBase
_ROUGE_PATH = './ROUGE-1.5.5'
class MarginRankingLoss(LossBase):
def __init__(self, margin, score=None, summary_score=None):
super(MarginRankingLoss, self).__init__()
self._init_param_map(score=score, summary_score=summary_score)
self.margin = margin
self.loss_func = torch.nn.MarginRankingLoss(margin)
def get_loss(self, score, summary_score):
# equivalent to initializing TotalLoss to 0
# here is to avoid that some special samples will not go into the following for loop
ones = torch.ones(score.size()).cuda(score.device)
loss_func = torch.nn.MarginRankingLoss(0.0)
TotalLoss = loss_func(score, score, ones)
# candidate loss
n = score.size(1)
for i in range(1, n):
pos_score = score[:, :-i]
neg_score = score[:, i:]
pos_score = pos_score.contiguous().view(-1)
neg_score = neg_score.contiguous().view(-1)
ones = torch.ones(pos_score.size()).cuda(score.device)
loss_func = torch.nn.MarginRankingLoss(self.margin * i)
TotalLoss += loss_func(pos_score, neg_score, ones)
# gold summary loss
pos_score = summary_score.unsqueeze(-1).expand_as(score)
neg_score = score
pos_score = pos_score.contiguous().view(-1)
neg_score = neg_score.contiguous().view(-1)
ones = torch.ones(pos_score.size()).cuda(score.device)
loss_func = torch.nn.MarginRankingLoss(0.0)
TotalLoss += loss_func(pos_score, neg_score, ones)
return TotalLoss
class ValidMetric(MetricBase):
def __init__(self, save_path, data, score=None):
super(ValidMetric, self).__init__()
self._init_param_map(score=score)
self.save_path = save_path
self.data = data
self.top1_correct = 0
self.top6_correct = 0
self.top10_correct = 0
self.rouge = Rouge()
self.ROUGE = 0.0
self.Error = 0
self.cur_idx = 0
# an approximate method of calculating ROUGE
def fast_rouge(self, dec, ref):
if dec == '' or ref == '':
return 0.0
scores = self.rouge.get_scores(dec, ref)
return (scores[0]['rouge-1']['f'] + scores[0]['rouge-2']['f'] + scores[0]['rouge-l']['f']) / 3
def evaluate(self, score):
batch_size = score.size(0)
self.top1_correct += int(torch.sum(torch.max(score, dim=1).indices == 0))
self.top6_correct += int(torch.sum(torch.max(score, dim=1).indices <= 5))
self.top10_correct += int(torch.sum(torch.max(score, dim=1).indices <= 9))
# Fast ROUGE
for i in range(batch_size):
max_idx = int(torch.max(score[i], dim=0).indices)
if max_idx >= len(self.data[self.cur_idx]['indices']):
self.Error += 1 # Check if the candidate summary generated by padding is selected
self.cur_idx += 1
continue
ext_idx = self.data[self.cur_idx]['indices'][max_idx]
ext_idx.sort()
dec = []
ref = ' '.join(self.data[self.cur_idx]['summary'])
for j in ext_idx:
dec.append(self.data[self.cur_idx]['text'][j])
dec = ' '.join(dec)
self.ROUGE += self.fast_rouge(dec, ref)
self.cur_idx += 1
def get_metric(self, reset=True):
top1_accuracy = self.top1_correct / self.cur_idx
top6_accuracy = self.top6_correct / self.cur_idx
top10_accuracy = self.top10_correct / self.cur_idx
ROUGE = self.ROUGE / self.cur_idx
eval_result = {'top1_accuracy': top1_accuracy, 'top6_accuracy': top6_accuracy,
'top10_accuracy': top10_accuracy, 'Error': self.Error, 'ROUGE': ROUGE}
with open(join(self.save_path, 'train_info.txt'), 'a') as f:
print('top1_accuracy = {}, top6_accuracy = {}, top10_accuracy = {}, Error = {}, ROUGE = {}'.format(
top1_accuracy, top6_accuracy, top10_accuracy, self.Error, ROUGE), file=f)
if reset:
self.top1_correct = 0
self.top6_correct = 0
self.top10_correct = 0
self.ROUGE = 0.0
self.Error = 0
self.cur_idx = 0
return eval_result
class MatchRougeMetric(MetricBase):
def __init__(self, data, save_path, n_total, score=None):
super(MatchRougeMetric, self).__init__()
self._init_param_map(score=score)
self.data = data
self.save_path = save_path
self.n_total = n_total
self.cur_idx = 0
self.ext = []
self.start = time()
def evaluate(self, score):
ext = int(torch.max(score, dim=1).indices) # batch_size = 1
self.ext.append(ext)
self.cur_idx += 1
print('{}/{} ({:.2f}%) decoded in {} seconds\r'.format(
self.cur_idx, self.n_total, self.cur_idx/self.n_total*100, timedelta(seconds=int(time()-self.start))
), end='')
def get_metric(self, reset=True):
gold = []
pred = []
print('\nStart writing files !!!')
for i, ext in enumerate(self.ext):
sent_ids = self.data[i]['indices'][ext]
dec, ref = [], []
for j in sent_ids:
dec.append(self.data[i]['text'][j])
for sent in self.data[i]['summary']:
ref.append(sent)
pred.append("\n".join(dec))
gold.append("\n".join(ref))
hypo_path = self.save_path + '/matchsum_hypo.csv'
ref_path = self.save_path + '/matchsum_ref.csv'
hypo_df = pd.DataFrame(pred, columns=["hypothesis"])
hypo_df.to_csv(hypo_path, index=False)
ref_df = | pd.DataFrame(gold, columns=["references"]) | pandas.DataFrame |
import time
import numpy as np
import pandas as pd
from ai4good.models.cm.simulator import AGE_SEP
DIGIT_SEP = ' to ' # em dash to separate from minus sign
def timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
print('%s function took %0.1f s' % (f.__name__, (time2-time1)))
return ret
return wrap
def load_report(mr, params) -> pd.DataFrame:
return normalize_report(mr.get('report'), params)
def normalize_report(df, params):
df = df.copy()
if 'R0' in df.columns:
df.R0 = df.R0.apply(lambda x: round(complex(x).real, 1))
df_temp = df.drop(['Time', 'R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu'],
axis=1)
else:
df_temp = df.drop(['Time', 'iteration'], axis=1)
df_temp = df_temp * params.population
df.update(df_temp)
return df
@timing
def prevalence_all_table(df):
# calculate Peak Day IQR and Peak Number IQR for each of the 'incident' variables to table
if 'iteration' in df.columns:
df = df.filter(regex='^Time$|^iteration$|^Infected \(symptomatic\)$|^Hospitalised$|^Critical$|^Change in Deaths$')
groupby_columns = ['iteration']
else:
df = df.filter(regex='^Time$|^R0$|^latentRate$|^removalRate$|^hospRate$|^deathRateICU$|^deathRateNoIcu$|^Infected \(symptomatic\)$|^Hospitalised$|^Critical$|^Change in Deaths$')
groupby_columns = ['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu']
grouped = df.groupby(groupby_columns)
indices_to_drop = groupby_columns + ['Time']
peak_days = get_quantile_report(grouped.apply(lambda x: x.set_index('Time').idxmax()), indices_to_drop)
peak_numbers = get_quantile_report(grouped.max(), indices_to_drop)
resultdf = pd.DataFrame.from_dict({'Peak Day IQR': peak_days, 'Peak Number IQR': peak_numbers})
resultdf.index.name = 'Outcome'
table_columns = {'Infected (symptomatic)': 'Prevalence of Symptomatic Cases',
'Hospitalised': 'Hospitalisation Demand',
'Critical': 'Critical Care Demand', 'Change in Deaths': 'Prevalence of Deaths'}
return resultdf.reindex(index=table_columns.keys()).rename(index=table_columns).reset_index()
def get_quantile_report(x, indices_to_drop):
return x.stack().groupby(level=-1).quantile([.25, .75])\
.apply(round).astype(int).astype(str).groupby(level=0).apply(lambda x: DIGIT_SEP.join(x.values)).drop(index = indices_to_drop, errors='ignore')
@timing
def prevalence_age_table(df):
# calculate age specific Peak Day IQR and Peak Number IQR for each of the 'prevalent' variables to contruct table
if 'iteration' in df.columns:
df = df.filter(regex='^Time$|^iteration$|^Infected \(symptomatic\)|^Hospitalised|^Critical')
groupby_columns = ['iteration']
else:
df = df.filter(regex='^Time$|^R0$|^latentRate$|^removalRate$|^hospRate$|^deathRateICU$|^deathRateNoIcu$|^Infected \(symptomatic\)|^Hospitalised|^Critical')
groupby_columns = ['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu']
grouped = df.groupby(groupby_columns)
indices_to_drop = groupby_columns + ['Time']
peak_days = get_quantile_report(grouped.apply(lambda x: x.set_index('Time').idxmax()), indices_to_drop)
peak_numbers = get_quantile_report(grouped.max(), indices_to_drop)
resultdf = pd.DataFrame.from_dict({'Peak Day, IQR': peak_days, 'Peak Number, IQR': peak_numbers})
arrays = [np.array(['Incident Cases']*9 + ['Hospital Demand']*9 + ['Critical Demand']*9),
np.array(
['all ages', '<9 years', '10-19 years', '20-29 years', '30-39 years', '40-49 years', '50-59 years',
'60-69 years', '70+ years']*3)]
sorted_index = resultdf.sort_index().index.values
my_comp_order = ['Infected (symptomatic)', 'Hospitalised', 'Critical']
my_sorted_index = sum([list(filter(lambda column: comp in column, sorted_index)) for comp in my_comp_order], [])
sortedresultdf = resultdf.reindex(index=my_sorted_index)
sortedresultdf.index = pd.MultiIndex.from_arrays(arrays)
return sortedresultdf
@timing
def cumulative_all_table(df, population, camp_params):
# now we try to calculate the total count
# cases: (N-exposed)*0.5 since the asymptomatic rate is 0.5
# hopistal days: cumulative count of hospitalisation bucket
# critical days: cumulative count of critical days
# deaths: we already have that from the frame
if 'iteration' in df.columns:
df = df.filter(regex='^Time$|^iteration$|Susceptible' + AGE_SEP + '|^Deaths$|^Hospitalised$|^Critical$|^Deaths$')
groupby_columns = ['iteration']
else:
df = df.filter(regex='^Time$|^R0$|^latentRate$|^removalRate$|^hospRate$|^deathRateICU$|^deathRateNoIcu$|Susceptible' + AGE_SEP + '|^Deaths$|^Hospitalised$|^Critical$|^Deaths$')
groupby_columns = ['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu']
groups = df.groupby(groupby_columns)
groups_tails = groups.apply(lambda x: x.set_index('Time').tail(1))
susceptible = groups_tails.filter(like='Susceptible'+AGE_SEP).rename(columns=lambda x: x.split(AGE_SEP)[1])[camp_params['Age']]
susceptible = ((population * camp_params['Population_structure'].values / 100 - susceptible) * camp_params['p_symptomatic'].values).sum(axis=1)
susceptible.index = susceptible.index.droplevel('Time')
deaths = groups_tails['Deaths']
deaths.index = deaths.index.droplevel('Time')
cumulative = {
'Susceptible': susceptible,
'Hospitalised': groups['Hospitalised'].sum(),
'Critical': groups['Critical'].sum(),
'Deaths': deaths
}
cumulative_all = pd.DataFrame(cumulative)
cumulative_count = cumulative_all.quantile([.25, .75]).apply(round).astype(int).astype(str).apply(lambda x: DIGIT_SEP.join(x.values), axis=0).values
data = {'Totals': ['Symptomatic Cases', 'Hospital Person-Days', 'Critical Person-days', 'Deaths'],
'Counts': cumulative_count}
return pd.DataFrame.from_dict(data)
@timing
def cumulative_age_table(df, camp_params):
# need to have an age break down for this as well
# 1 month 3 month and 6 month breakdown
arrays = [np.array(
['Symptomatic Cases'] * 9 + ['Hospital Person-Days'] * 9 + ['Critical Person-days'] * 9 + ['Deaths'] * 9),
np.array(
['all ages', '<9 years', '10-19 years', '20-29 years', '30-39 years', '40-49 years', '50-59 years',
'60-69 years', '70+ years'] * 4)]
params_select = ['Susceptible:', 'Deaths']
params_accu = ['Hospitalised', 'Critical']
columns_to_acc, columns_to_select, multipliers = collect_columns(df.columns, params_accu, params_select, camp_params)
if 'iteration' in df.columns:
groupby_columns = ['iteration']
else:
groupby_columns = ['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu']
first_month_diff = df.groupby(groupby_columns)[
columns_to_select + ['Time']].apply(find_first_month_diff)
third_month_diff = df.groupby(groupby_columns)[
columns_to_select + ['Time']].apply(find_third_month_diff)
sixth_month_diff = df.groupby(groupby_columns)[
columns_to_select + ['Time']].apply(find_sixth_month_diff)
first_month_select = first_month_diff[columns_to_select].mul(multipliers).quantile([.25, .75])
three_month_select = third_month_diff[columns_to_select].mul(multipliers).quantile([.25, .75])
six_month_select = sixth_month_diff[columns_to_select].mul(multipliers).quantile([.25, .75])
first_month_select['Susceptible'] = first_month_select.filter(like='Susceptible:').sum(axis=1)
three_month_select['Susceptible'] = three_month_select.filter(like='Susceptible:').sum(axis=1)
six_month_select['Susceptible'] = six_month_select.filter(like='Susceptible:').sum(axis=1)
one_month_cumsum = df.groupby(groupby_columns)[
columns_to_acc + ['Time']].apply(find_one_month)
three_month_cumsum = df.groupby(groupby_columns)[
columns_to_acc + ['Time']].apply(find_three_months)
six_month_cumsum = df.groupby(groupby_columns)[
columns_to_acc + ['Time']].apply(find_six_months)
first_month_accu = one_month_cumsum[columns_to_acc].quantile([.25, .75])
three_month_accu = three_month_cumsum[columns_to_acc].quantile([.25, .75])
six_month_accu = six_month_cumsum[columns_to_acc].quantile([.25, .75])
first_month = pd.concat([first_month_select, first_month_accu], axis=1)
third_month = pd.concat([three_month_select, three_month_accu], axis=1)
sixth_month = pd.concat([six_month_select, six_month_accu], axis=1)
sorted_columns = first_month.columns.sort_values()
my_comp_order = ['Susceptible', 'Hospitalised', 'Critical', 'Deaths']
my_sorted_columns = sum([list(filter(lambda column: comp in column, sorted_columns)) for comp in my_comp_order], [])
first_month_count = first_month[my_sorted_columns]\
.apply(round).astype(int).astype(str) \
.apply(lambda x: DIGIT_SEP.join(x.values), axis=0).values
three_month_count = third_month[my_sorted_columns]\
.apply(round).astype(int).astype(str) \
.apply(lambda x: DIGIT_SEP.join(x.values), axis=0).values
six_month_count = sixth_month[my_sorted_columns]\
.apply(round).astype(int).astype(str) \
.apply(lambda x: DIGIT_SEP.join(x.values), axis=0).values
d = {'First month': first_month_count, 'First three months': three_month_count,
'First six months': six_month_count}
return pd.DataFrame(data=d, index=arrays)
def collect_columns(columns, params_accu, params_select, camp_params):
columns_to_select = list(filter(lambda column: any(column.startswith(s) for s in params_select), columns))
columns_to_acc = list(filter(lambda column: any(column.startswith(s) for s in params_accu), columns))
multipliers = list(
map(lambda column: -camp_params[camp_params['Age'].apply(lambda x: x in column)]['p_symptomatic'].values[0] if 'Susceptible:' in column else 1,
columns_to_select))
return columns_to_acc, columns_to_select, multipliers
def diff_table(baseline, intervention, N):
t1 = effectiveness_cum_table(baseline, intervention, N)
t2 = effectiveness_peak_table(baseline, intervention)
r1 = [
'Symptomatic Cases',
t1.loc['Symptomatic Cases']['Reduction'],
t2.loc['Prevalence of Symptomatic Cases']['Delay in Peak Day'],
t2.loc['Prevalence of Symptomatic Cases']['Reduction in Peak Number']
]
r2 = [
'Hospital Person-Days',
t1.loc['Hospital Person-Days']['Reduction'],
t2.loc['Hospitalisation Demand']['Delay in Peak Day'],
t2.loc['Hospitalisation Demand']['Reduction in Peak Number']
]
r3 = [
'Critical Person-days',
t1.loc['Critical Person-days']['Reduction'],
t2.loc['Critical Care Demand']['Delay in Peak Day'],
t2.loc['Critical Care Demand']['Reduction in Peak Number']
]
r4 = [
'Deaths',
t1.loc['Deaths']['Reduction'],
t2.loc['Prevalence of Deaths']['Delay in Peak Day'],
t2.loc['Prevalence of Deaths']['Reduction in Peak Number']
]
df = pd.DataFrame([r1, r2, r3, r4],
columns=['Outcome', 'Overall reduction', 'Delay in Peak Day', 'Reduction in Peak Number'])
return df
def effectiveness_cum_table(baseline, intervention, N):
table_params = ['Symptomatic Cases', 'Hospital Person-Days', 'Critical Person-days', 'Deaths']
cum_table_baseline = cumulative_all_table(baseline, N)
# print("CUM: "+str(cum_table_baseline.loc[:, 'Counts']))
baseline_numbers = cum_table_baseline.loc[:, 'Counts'].apply(lambda x: [int(i) for i in x.split(DIGIT_SEP)])
baseline_numbers_separate = pd.DataFrame(baseline_numbers.tolist(), columns=['25%', '75%'])
comparisonTable = {}
cumTable = cumulative_all_table(intervention, N)
# print("Counts: \n"+str(cumTable.loc[:, 'Counts']))
intervention_numbers = pd.DataFrame(
cumTable.loc[:, 'Counts'].apply(lambda x: [int(i) for i in x.split(DIGIT_SEP)]).tolist(),
columns=['25%', '75%'])
differencePercentage = (baseline_numbers_separate - intervention_numbers) / baseline_numbers_separate * 100
prettyOutput = []
for _, row in differencePercentage.round(0).astype(int).iterrows():
output1, output2 = row['25%'], row['75%']
prettyOutput.append(format_diff_row(output1, output2))
comparisonTable['Reduction'] = prettyOutput
comparisonTable['Total'] = table_params
return | pd.DataFrame.from_dict(comparisonTable) | pandas.DataFrame.from_dict |
import os
from tqdm import tqdm
from datetime import datetime
import pandas as pd
import numpy as np
from scipy.io import wavfile
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten
from tensorflow.keras.layers import LSTM, TimeDistributed
from tensorflow.keras.layers import Dropout, SpatialDropout2D, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras.utils import to_categorical
from sklearn.utils.class_weight import compute_class_weight
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
import pickle
from python_speech_features import mfcc
import librosa
from librosa.feature import melspectrogram
from cfg import Config
from buildfeats import build_rand_feat
def get_conv_model():
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu',
strides=(1,1), padding='same', input_shape=input_shape))
model.add(Conv2D(16, (3, 3), activation='relu', strides=(1,1),padding='same'))
model.add(Conv2D(32, (3, 3), activation='relu', strides=(1,1),padding='same'))
# model.add(Conv2D(128, (3, 3), activation='relu', strides=(1,1), padding='same'))
model.add(MaxPool2D((2,2)))
model.add(Dropout(0.5))
model.add(Flatten())
# model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(5, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def get_recurrent_model():
#shape of data for RNN is (n, time, feat)
model = Sequential()
model.add(LSTM(128, return_sequences=True, input_shape=input_shape))
model.add(LSTM(128, return_sequences=True))
model.add(Dropout(0.5))
model.add(TimeDistributed(Dense(64, activation='relu')))
model.add(TimeDistributed(Dense(32, activation='relu')))
model.add(TimeDistributed(Dense(16, activation='relu')))
model.add(TimeDistributed(Dense(8, activation='relu')))
model.add(Flatten())
model.add(Dense(5, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
config = Config(mode='conv')
cur_df = pd.read_csv('data/train/roadsound_labels.csv', index_col=0)
noisy_df = pd.read_csv('data/train_noisy/roadsound_labels.csv', index_col=0)
df = | pd.concat([cur_df, noisy_df], sort=True) | pandas.concat |
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.preprocessing import Imputer
from sklearn import linear_model
from sklearn.ensemble import BaggingRegressor, RandomForestRegressor
from sklearn import svm
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import make_scorer, r2_score
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import tflearn
import tensorflow as tf
import warnings
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
def lets_try(train, labels):
results = {}
def test_model(clf):
cv = KFold(n_splits=5, shuffle=True, random_state=45)
r2 = make_scorer(r2_score)
r2_val_score = cross_val_score(clf, train, labels, cv=cv, scoring=r2)
scores = [r2_val_score.mean()]
return scores
clf = linear_model.LinearRegression()
results["Linear"] = test_model(clf)
clf = linear_model.Ridge()
results["Ridge"] = test_model(clf)
clf = linear_model.BayesianRidge()
results["Bayesian Ridge"] = test_model(clf)
clf = linear_model.HuberRegressor()
results["Hubber"] = test_model(clf)
clf = linear_model.Lasso(alpha=1e-4)
results["Lasso"] = test_model(clf)
clf = BaggingRegressor()
results["Bagging"] = test_model(clf)
clf = RandomForestRegressor()
results["RandomForest"] = test_model(clf)
clf = AdaBoostRegressor()
results["AdaBoost"] = test_model(clf)
clf = svm.SVR()
results["SVM RBF"] = test_model(clf)
clf = svm.SVR(kernel="linear")
results["SVM Linear"] = test_model(clf)
results = | pd.DataFrame.from_dict(results, orient='index') | pandas.DataFrame.from_dict |
###########################
#
# SizeTools
#
# Tools to work with data of different sizes
#
import pandas as pd
import numpy as np
import re
##################
#
# Stats and Computations
#
#
def subsample_time(myPSD,start_time,end_time):
good_samples = (myPSD.sampleTimes > start_time) & (myPSD.sampleTimes < end_time)
shorteneddata_PSD = myPSD.dNdlogDp[good_samples,:]
return shorteneddata_PSD
def compute_stats_sampletime(myPSD,start_time,end_time):
shorteneddata = subsample_time(myPSD,start_time,end_time)
sd_mean = np.nanmean(shorteneddata,axis=0)
sd_std = np.nanstd(shorteneddata,axis=0)
return sd_mean,sd_std
##################
#
# Plotting
#
#
def nicePalette():
nicepalette = ["#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7"]
return nicepalette
def plot_format(fs=16,ls=10):
import matplotlib.pyplot as plt
plt.rcdefaults()
fSize = fs
fName = 'Arial'
fWght = 'bold'
defLW = 2
#Format the plots
font = {'family' : 'normal',
'weight' : fWght,
'size' : fSize}
plt.rc('font', **font)
plt.rc('legend',fontsize=10)
plt.rc('axes',linewidth=defLW)
plt.rc('axes',labelsize=fSize)
plt.rc('axes',labelweight=fWght)
#plt.rc('axes',edgecolor=[0.1,0.1,0.1])#,color='black')
plt.rc('lines',linewidth = defLW)
def plot_3dTS(myPSD,savename):
import matplotlib.pyplot as plt
plt.figure()
#plt.pcolormesh(myPSD.Time_HOD,myPSD.binEdges,myPSD.data.transpose(),vmin=0,vmax=5)
plt.pcolormesh(myPSD.Time_HOD,myPSD.binEdges,myPSD.dNdlogDp.transpose(),vmin=0,vmax=50)
plt.colorbar()
plt.yticks(myPSD.binCenters)
plt.ylim([0.25,12])
plt.xlim([np.nanmin(myPSD.Time_HOD),np.nanmax(myPSD.Time_HOD)])
plt.xlabel('Hour of Day (EST)')
plt.ylabel('Size (um)')
plt.title('Concentration (Dn/DlogDp), #/cm^-3')
plt.savefig(savename)
##################
#
# Loading
#
#
def load_APS(file_load,delim=','):
myTSI = TSI()
df = pd.read_table(file_load,header=6,parse_dates=[[1,2]],index_col=0,delimiter=delim)
#Handle Bin Edges
bin_edges_aps = np.array([0.4870,0.5228,0.5624,0.6039,0.6492,0.6975,0.7495,0.8055,0.8663,0.9309,1.0004,1.0746,1.1547,1.2406,1.3332,1.4335,1.5396,1.6544,1.7779,1.9110,2.0538,2.2071,2.3711,2.5486,2.7387,2.9432,3.1622,3.3985,3.6522,3.9242,4.2165,4.5320,4.8696,5.2333,5.6230,6.0426,6.4941,6.9784,7.4993,8.0588,8.6598,9.3061,10.0035,10.7463,11.5470,12.4055,13.3316,14.3349,15.3960,16.5439,17.7787,19.1099,20.5353])
dLogDp = np.diff(np.log10(bin_edges_aps))
myTSI.binEdges = bin_edges_aps
logbe = np.log10(myTSI.binEdges)
bdiff = np.divide(np.diff(logbe),2)
logbc = logbe[0:-1] + bdiff
myTSI.binCenters = np.power(10,logbc)
#Handle Times
myTSI.sampleTimes = pd.to_datetime(df.index)
myTSI.Time_HOD = myTSI.Time_HOD = np.array([(time.hour + np.true_divide(time.minute,60) + np.true_divide(time.second,3600)) for time in myTSI.sampleTimes])
#Check type and compute data values
if df['Aerodynamic Diameter'][1] == 'dN':
myTSI.data=df.ix[:,2:54].as_matrix()
myTSI.dNdlogDp = np.divide(myTSI.data,dLogDp)
return myTSI
def load_3330(input_file):
myTSI = TSI()
with open(input_file,'r') as f:
myTSI.instrName = f.readline().strip().split(',')[1]
myTSI.modelNum = int(f.readline().strip().split(',')[1])
myTSI.serialNum = int(f.readline().strip().split(',')[1])
myTSI.firmware = f.readline().strip().split(',')[1]
myTSI.calDate = f.readline().strip().split(',')[1]
myTSI.protocolName = f.readline().strip().split(',')[1]
myTSI.testStartTime = f.readline().strip().split(',')[1]
myTSI.testStartDate = f.readline().strip().split(',')[1]
myTSI.testLength = f.readline().strip().split(',')[1]
myTSI.sampleInterval= f.readline().strip().split(',')[1]
myTSI.numChannels= int(f.readline().strip().split(',')[1])+1
myTSI.ChannelNum = np.arange(0,myTSI.numChannels,1)
myTSI.cutPoint = np.zeros(myTSI.numChannels)
for channel in myTSI.ChannelNum:
myTSI.cutPoint[channel] = float(f.readline().strip().split(',')[1])
myTSI.alarm = f.readline().strip().split(',')[1]
myTSI.Density = float(f.readline().strip().split(',')[1])
myTSI.refractiveIndex = f.readline().strip().split(',')[1]
myTSI.sizeCorrFac = float(f.readline().strip().split(',')[1])
myTSI.flowCal = float(f.readline().strip().split(',')[1])
myTSI.deadTimeCorrFac = float(f.readline().strip().split(',')[1])
myTSI.errors = f.readline().strip().split(',')[1]
myTSI.numSamples = int(f.readline().strip().split(',')[1])
test = f.readline().strip()
if test != ',':
print("Error on the read")
print(test)
myTSI.columnNames = f.readline().strip().split(',')
#PreAllocateNumpyArrays:
myTSI.elapsedTime = np.zeros(myTSI.numSamples)
myTSI.rawdata = np.zeros((myTSI.numSamples,myTSI.numChannels))
myTSI.deadTime = np.zeros(myTSI.numSamples)
myTSI.T = np.zeros(myTSI.numSamples)
myTSI.RH = np.zeros(myTSI.numSamples)
myTSI.P = np.zeros(myTSI.numSamples)
myTSI.alarms = []
myTSI.errors = []
nC = myTSI.numChannels
nS = myTSI.numSamples
myTSI.rawdata[:] = np.NaN
datastr = '%i,' + ' %04d,'*nC + '%i,%04d,%04d,%04d,%s,%s'
for i in range(0,myTSI.numSamples):
data = f.readline().strip().split(',')
myTSI.elapsedTime[i] = int(data[0])
myTSI.rawdata[i,:] = np.asarray(data[1:nC + 1],dtype=np.float32)
myTSI.deadTime[i] = float(data[nC + 1])
myTSI.T[i] = float(data[nC + 2] )
myTSI.RH[i] = float(data[nC + 3])
myTSI.P[i] = float(data[nC + 4])
myTSI.alarms.append(data[nC + 5])
myTSI.errors.append(data[nC + 6])
myTSI.binEdges = myTSI.cutPoint
#Note: We do not do any serious work with the largest bin
#We do not know how big the particles are
##Test analysis:
#this uses the equation in 5.5.
#We currently do not applythe deadtime correction factor.
#Concentration Factor
#This is the concentration!
#Get sample interval in seconds
siv = np.asarray(myTSI.sampleInterval.split(':'),dtype='float')
samp_time = siv[0]*3600+siv[1]*60+siv[2]
samp_time_corr = np.subtract(samp_time,np.multiply(myTSI.deadTimeCorr,myTSI.deadTime))
concentration_factor = np.multiply(myTSI.flowRate,samp_time)
myTSI.data = np.divide(myTSI.rawdata,concentration_factor)
sumparts = np.nansum(myTSI.data[:,:],axis=1)
#Now for the good stuff
#Convert sizes to DnDlogDp
#Discard the last size bin because it counts but does not size particles larger than 10 micron
#Not actually bin center
myTSI.binCenters = myTSI.cutPoint[0:-1] + np.divide(np.diff(myTSI.cutPoint),2)
logvs = np.log10(myTSI.cutPoint)
dlogDp = np.diff(logvs)
myTSI.dNdlogDp = np.divide(myTSI.data[:,0:-1],dlogDp)
leftloc = myTSI.cutPoint[0:-1]
width = np.diff(myTSI.cutPoint)
myTSI.startDateTime = pd.to_datetime(myTSI.testStartDate + ' ' + myTSI.testStartTime)
myTSI.sampleTimes = myTSI.startDateTime + pd.to_timedelta(myTSI.elapsedTime,'s')
myTSI.Time_HOD = np.array([(time.hour + np.true_divide(time.minute,60) + np.true_divide(time.second,3600)) for time in myTSI.sampleTimes])
return myTSI
def load_EDM164(input_file):
myGRM = GRIMM()
average=False
nC = 0
nc = 0
current_time = []
grimm_sample_duration = pd.Timedelta('6 seconds')
with open(input_file,'r') as f:
alldata = f.readlines()
for line in alldata:
#for line in f.readlines():
# print line
data = line.strip()
#print data
#Handle scenarios where there's no data
if len(data) == 0:
#no data in line
continue
if (not current_time) & (data[0] != 'P'):
#we started the file in the middle of a read and don't know the time
continue
if data == 'POWER OFF':
#Power turned off. Could confuse with the 'P' command
continue
if data[0] == 'P':
#It is a new measurement, parse the new measurement
#Clean, strip the 'p , split by tabs'
p_clean = re.sub(r'[P_]','',data).split()
#Date and date string
#print p_clean
datestr = p_clean[1] + '/' + p_clean[2] + '/20' +p_clean[0] + ' ' +p_clean[3] + ':' +p_clean[4]
current_time = | pd.to_datetime(datestr) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: valengo
"""
import sys
from collections import defaultdict
from math import sqrt
from multiprocessing import cpu_count
from typing import Union
import pysam
import numpy as np
from pandas import DataFrame
from amplicnv.tsvparser import CoverageFileParser
from ..bedloader import ROI
from ..commons import ChromDF as cdf
from ..graphics import scatter
from ..mphandler import MPPoolHandler
from ..stats import above_range
from ..stats import below_range
from ..stats import classify_by_count
from ..stats import compute_metric
from ..stats import filter_by_cutoff
from ..stats import iqr
from ..utils import ConfigfileParser
from ..utils import NumberProperty
from ..utils import Region
from ..utils import appenddir
from ..utils import createdir
from ..utils import overrides
from ..utils import validstr
def readcount(region_list: list, filename: str) -> list:
"""
Count the number of reads in regions using pysam.AlignmentFile.count method
:param list region_list: regions in the format: 'chr1:1000-10000'
:param str filename: path to bamfile
:return counters: list of number of reads in regions
"""
try:
with pysam.AlignmentFile(filename, 'rb') as bamfile:
counters = []
for region in region_list:
try:
counters.append(bamfile.count(region=region))
except ValueError:
print("Failed counting the number of reads in {} from {}".format(region, filename))
counters.append(None)
return counters
except OSError:
sys.exit("Failed counting the number of reads in region from {}".format(filename))
def attr_from_header(line):
if line.startswith('#'):
return line.split(':', 1)[1].strip()
else:
print('{0} is not a header line'.format(line))
return None
def metric2label(counters, q1, q3, metric, interval_range):
labels = []
for counter in counters:
if below_range(counter, metric, q1, interval_range):
labels.append('-')
elif above_range(counter, metric, q3, interval_range):
labels.append('+')
else:
labels.append('o')
return labels
@validstr('bedfile', empty_allowed=True)
@validstr('bamfile', empty_allowed=True)
@validstr('region', empty_allowed=True)
class NRR(object):
"""
NRR stands for "Number of Reads in Region" loaded from a BAM file
:param str bedfile: path to bedfile where amplicons are listed in
:param str bamfile: path to alignment file (bam format)
:param str region: limit target definition to a given region. It should be in the form: chr1:10000-90000
:param list counters: list of read depth counters
:param ROI bed: amplicons already loaded in memory
:param bool parallel: whether to count target read depth in parallel
:param bool to_label: whether to label targets regarding each target read depth in comparison to the mean
:param str covfile: path to amplicon.cov file
"""
def __init__(self, bedfile: str = None, bamfile: str = None, region: str = None,
counters: list = [], bed: Union[ROI, CoverageFileParser] = None, parallel: bool = True,
to_label: bool = False, covfile: str = None):
self.covfile = covfile
self.bedfile = bedfile
self.bamfile = bamfile
self.region = region
self._counters = self.counters = counters
self._bed = self.bed = bed
self.reads_by_pool = defaultdict(int)
self._nreads = self.nreads = 0
self.normalized_counters = []
self.labels = None
self.labels_by_pool = None
# load or count rd
if self.covfile is not None:
self.bed = CoverageFileParser(self.covfile)
self.counters = self.bed.counters
elif self.load(self.bamfile + '.txt') is None:
self.count(parallel=parallel)
self.save()
if self.counters:
self.reads_by_pool = self.__count_pools()
self.normalized_counters = self.__norm()
if len(self.counters) > 0 and to_label:
print('Labeling targets')
self.labels = self.__label_targets(mode='log')
self.labels_by_pool = self.count_label_by_pool()
@property
def bed(self):
return self._bed
@bed.setter
def bed(self, value):
if (value is None and
self.bedfile is not None and self.covfile is None):
self._bed = ROI(self.bedfile)
else:
self._bed = value
@property
def counters(self):
return self._counters
@counters.setter
def counters(self, value):
self._counters = value
@property
def nreads(self):
return self._nreads
@nreads.setter
def nreads(self, value):
self._nreads = value
def count(self, cores: int = None, parallel: bool = False):
"""
For each defined target, count read depth
:param int cores: number of cores to be used when counting in parallel mode. Default: all available
:param bool parallel: whether to count read depth in parallel
"""
if parallel:
if cores is None:
cores = cpu_count()
self.counters = self.__parallel_count(cores)
else:
self.counters = self.__count()
def load(self, filename: str) -> Union[int, None]:
"""
Load a single NRR from a text file
:param str filename: path to count file. Normally something like: bamfile.bam.txt
:return: 1 when data loading is successful, None otherwise
"""
print('Loading {0} read counters'.format(filename))
try:
with open(filename, 'r') as file:
lines = file.readlines()
except IOError:
print('There is no counter file for {0} file'.format(self.bamfile))
return None
try:
self.bamfile = attr_from_header(lines[0])
self.bedfile = attr_from_header(lines[1])
self.region = attr_from_header(lines[2])
if self.region == 'None':
self.region = None
except IndexError:
print('Not enough information on {} header'.format(filename))
print('Aborting!')
return None
print('Extracting read counters from file')
counters = []
for line in lines:
if line.startswith('chr'):
counters.append(int(line.split('\t')[4].strip()))
self.counters = counters
if self.bed is None:
self.bed = ROI(self.bedfile)
return 1
def save(self, filename: str = None):
"""
Save a single NRR on a text file
:param str filename: path to output
"""
print('Saving {0} read counters'.format(self.bamfile))
if filename is None:
filename = self.bamfile + '.txt'
try:
with open(filename, 'w') as file:
file.write('# BAM source: {0}\n'.format(self.bamfile))
file.write('# bed source: {0}\n'.format(self.bedfile))
file.write('# Regions: {0}\n'.format(self.region))
t = list(self.bed.targets.itertuples())
for i in range(len(t)):
file.write('{0}\t{1}\t{2}\t{3}\t{4}\n'.format(t[i][1],
t[i][2],
t[i][3],
t[i][5],
self.counters
[i]))
print('Done!')
except IOError as error:
print(error)
def __count(self):
try:
bamfile = pysam.AlignmentFile(self.bamfile, 'rb')
except OSError as error:
print(error)
return None
print('Counting number of reads of {}'.format(
self.bamfile))
read_counters = []
for row in self.bed.targets.itertuples():
read_counters.append(bamfile.count(row[1], row[2], row[3]))
return read_counters
def __parallel_count(self, cores: int):
print('Counting number of reads of {}, using {} cores'.format(
self.bamfile, cores))
# get target regions
targets = self.bed.targets
# row[2] + 1 -> faking that we use one based index
region_list = ['{}:{}-{}'.format(row[1], row[2] + 1, row[3])
for row in targets.itertuples()]
# define chunksize and create chunks
chunksize, extra = divmod(len(region_list), cores * 4)
if extra:
chunksize += 1
chunks = []
for i in range(0, len(region_list), chunksize):
chunks.append(region_list[i:i + chunksize])
# define tasks
tasks = [(readcount, chunk, self.bamfile) for chunk in chunks]
with pysam.AlignmentFile(self.bamfile, 'rb'):
counters = MPPoolHandler(tasks, cores).run()
if counters:
counters = [c for counter in counters for c in counter]
return counters
def __count_pools(self):
"""
Count number of reads per pool.
This method is used when loading a NRR from a file
"""
print('Counting reads by pools')
targets = list(self.bed.targets.itertuples())
if len(targets) != len(self.counters):
print(f'Number of targets N={len(targets)} and their read counters N={len(self.counters)} differ.')
print('Aborting!')
return None
reads_by_pool = defaultdict(int)
for i, target in enumerate(targets):
pools = target.pools.unique_flattened()
for pool in pools:
reads_by_pool[pool] += self.counters[i] / len(pools)
self.nreads = sum(self.counters)
return reads_by_pool
def __norm(self, mag: int = 1000000):
print('Normalizing counters')
normalized = []
targets = list(self.bed.targets.itertuples())
if len(targets) != len(self.counters):
print('Number of targets and their read counters differ.')
print('Aborting!')
return None
for i, target in enumerate(targets):
current_pools_counter = []
pools = target.pools.unique_flattened()
for pool in pools:
current_pools_counter.append((self.counters[i] /
self.reads_by_pool[pool]))
normalized.append(mag * (sum(current_pools_counter) /
len(pools)))
return normalized
def __label_targets(self, iqr_range: float = 1.5, std_range: float = 1.5, mode: str = 'normalized') -> list:
"""
Label targets considering IQR on counters (normalized or not - user's choice) for discovering whether a target
is in the lower (-) quartile, upper (+) quartile, or middle (o)
:param float iqr_range: value to multiply IQR by
:param float std_range: value to multiply std by
:param str mode: 'normalized' for employing self.normalized_counters; 'log' for log(self.normalized_counters)
:return: labels (list) a list of size (len.counters) representing the labels for the targets
"""
# compute metric (IQR)
if mode == 'normalized':
counters = self.normalized_counters
elif mode == 'log':
counters = np.log(self.normalized_counters)
else:
counters = self.counters
df = | DataFrame(counters) | pandas.DataFrame |
import json
from numpy.core.numeric import NaN
import pandas as pd
from telegram import ChatAction
import telegram
from telegram.ext.conversationhandler import ConversationHandler
DATA_ANALISIS_ALL_MEMBER = 0
DATA_DESCRIPTION = 0
DATA_ANALISIS_ONE_MEMBER = 0
ENABLE_ANALISIS = 0
def enable_analisis(update, context):
if(update.message.chat["type"] == "group" or update.message.chat["type"] == "supergroup"):
with open('data.json', 'r+') as file:
data = json.load(file)
group_id = str(update.message.chat["id"])
if(data["Grupos"].get(group_id, False) != False):
user_id = str(update.message.from_user.id)
username = str(update.message.from_user.username)
verification = data["Grupos"][group_id]["Usuarios"].get(username, False)
if(verification == -1):
data["Grupos"][group_id]["Usuarios"][username] = user_id
data["Grupos"][group_id]["Administradores"].append(user_id)
if(user_id in data["Grupos"][group_id]["Administradores"]):
data["Analisis_Para_Administradores"][user_id] = group_id
telegram.Bot('2082442589:AAH3MNzWrZVcqXWkHNBvq5Y0edK15AWWvRM').sendMessage(user_id,
"Para pedir el análisis de los miembros del grupo presione \n /mostrar_analisis")
file.seek(0)
json.dump(data, file, indent=4)
file.truncate()
def analisis(update, context):
if(update.message.chat["type"] == "private"):
with open('data.json', 'r+') as file:
data = json.load(file)
user_id = str(update.message.from_user.id)
if(user_id in data["Analisis_Para_Administradores"].keys()):
group_id = data["Analisis_Para_Administradores"][user_id]
if(data["Grupos"].get(group_id, False) != False and
user_id in data["Grupos"][group_id]["Administradores"]):
group_analisis = data["Grupos"][group_id]["Integrantes"]
keys = group_analisis.keys()
questions = data["Preguntas"]
sub = {"Pregunta":[], "NumeroPregunta":[], "IndiceSubcategoria":[], "Subcategoria":[], "Categoria":[]}
for question in questions:
for column in question:
sub[column].append(question[column])
df = {"Nombre": []}
for q in sub["NumeroPregunta"]:
df[q] = []
for id in keys:
df["Nombre"].append(group_analisis[id]["Usuario"])
for i in range(60):
str_i = str(i)
if str_i in group_analisis[id].keys():
df[i+1].append(group_analisis[id][str_i])
else:
df[i+1].append(NaN)
df = pd.DataFrame(df)
sub = pd.DataFrame(sub)
preproc = pd.DataFrame()
preproc["Nombre"] = df["Nombre"]
categoria = sub.groupby(['Categoria'])
for i in categoria:
preproc[i[0]] = df.loc[:,list(categoria.get_group(i[0])["NumeroPregunta"])].sum(axis=1)
preproc["Porcentaje " + i[0]] =(df.loc[:,list(categoria.get_group(i[0])["NumeroPregunta"])].sum(axis=1))/80*100
preproc["Media " + i[0]] = df.loc[:,list(categoria.get_group(i[0])["NumeroPregunta"])].mean(axis=1)
subcategoria = sub.groupby(['Subcategoria'])
for i in subcategoria:
preproc[i[0]] = df.loc[:,list(subcategoria.get_group(i[0])["NumeroPregunta"])].mean(axis=1)
result = preproc.loc[:,"Alegria":].sum()/df["Nombre"].count()
moda = {
"Alegria" : {
"valor": "-",
"porcentaje": "-"
},
"Seriedad" : {
"valor": "-",
"porcentaje": "-"
},
"Mal humor": {
"valor": "-",
"porcentaje": "-"
}
}
if not preproc["Media Alegria"].empty and not pd.isna(preproc["Media Alegria"].round().mode(dropna=False)[0]):
moda["Alegria"]["valor"] = preproc["Media Alegria"].round().mode(dropna=False)[0]
moda["Alegria"]["porcentaje"] = preproc["Media Alegria"].round().value_counts(normalize=True).max()*100
if not preproc["Media Seriedad"].empty and not pd.isna(preproc["Media Seriedad"].round().mode(dropna=False)[0]):
moda["Seriedad"]["valor"] = preproc["Media Seriedad"].round().mode(dropna=False)[0]
moda["Seriedad"]["porcentaje"] = preproc["Media Seriedad"].round().value_counts(normalize=True).max()*100
if not preproc["Media Mal humor"].empty and not pd.isna(preproc["Media Mal humor"].round().mode(dropna=False)[0]):
moda["Mal humor"]["valor"] = preproc["Media Mal humor"].round().mode(dropna=False)[0]
moda["Mal humor"]["porcentaje"] = preproc["Media Mal humor"].round().value_counts(normalize=True).max()*100
descr = {
"descripcion": preproc.to_json(),
"resultado": result.to_json(),
"moda": moda,
}
data["Resultados"][group_id] = descr
file.seek(0)
json.dump(data, file, indent=4)
file.truncate()
def Show_Data_Analisis_All_Member(update, context):
if(update.callback_query.message.chat["type"] == "private"):
with open('data.json', 'r+') as file:
data = json.load(file)
user_id = str(update.callback_query.from_user.id)
if(user_id in data["Analisis_Para_Administradores"].keys()):
group_id = data["Analisis_Para_Administradores"][user_id]
if(data["Grupos"].get(group_id, False) != False and
user_id in data["Grupos"][group_id]["Administradores"]):
general = pd.DataFrame(json.loads(data["Resultados"][group_id]["descripcion"]))
general = general.loc[:, "Nombre": "Media Seriedad"]
general.to_csv("CSV/Resultados en el grupo.csv")
update.callback_query.message.chat.send_action(
action = ChatAction.UPLOAD_DOCUMENT,
timeout = None
)
update.callback_query.message.chat.send_document(
document = open("CSV/Resultados en el grupo.csv", 'rb')
)
file.seek(0)
json.dump(data, file, indent=4)
file.truncate()
def Show_Data_Analisis_One_Member(update, context):
if(update.callback_query.message.chat["type"] == "private"):
with open('data.json', 'r+') as file:
data = json.load(file)
user_id = str(update.callback_query.from_user.id)
if(user_id in data["Analisis_Para_Administradores"].keys()):
group_id = data["Analisis_Para_Administradores"][user_id]
if(data["Grupos"].get(group_id, False) != False and
user_id in data["Grupos"][group_id]["Administradores"]):
data["Grupos"][group_id]["Analisis"] = user_id
update.callback_query.message.reply_text("Escriba el username de la persona de la que desea ver sus datos (con / antes del nombre y sin @)")
file.seek(0)
json.dump(data, file, indent=4)
file.truncate()
return DATA_ANALISIS_ONE_MEMBER
else:
file.seek(0)
json.dump(data, file, indent=4)
file.truncate()
def Show_Data_Analisis_One_Member_Enter(update, context):
if(update.message.chat["type"] == "private"):
with open('data.json', 'r+') as file:
data = json.load(file)
user_id = str(update.message.from_user.id)
if(user_id in data["Analisis_Para_Administradores"].keys()):
group_id = data["Analisis_Para_Administradores"][user_id]
if(data["Grupos"].get(group_id, False) != False and
user_id in data["Grupos"][group_id]["Administradores"] and
data["Grupos"][group_id]["Analisis"] == user_id):
data["Grupos"][group_id]["Analisis"] = 0
name = update.message.text
if(name[0] != "/"):
file.seek(0)
json.dump(data, file, indent=4)
file.truncate()
return ConversationHandler.END
name = name[1:len(name)]
preproc = pd.DataFrame(json.loads(data["Resultados"][group_id]["descripcion"]))
personal = preproc[(preproc["Nombre"] == name)]
personal.to_csv("CSV/Resultados en la persona" + ".csv", sep=',')
update.message.reply_text("Resultados de la encuesta de: \n @" + name)
update.message.chat.send_action(
action = ChatAction.UPLOAD_DOCUMENT,
timeout = None
)
update.message.chat.send_document(
document = open("CSV/Resultados en la persona" + ".csv", 'rb')
)
file.seek(0)
json.dump(data, file, indent=4)
file.truncate()
return ConversationHandler.END
def Data_Description(update, context):
if(update.callback_query.message.chat["type"] == "private"):
with open('data.json', 'r+') as file:
data = json.load(file)
user_id = str(update.callback_query.from_user.id)
if(user_id in data["Analisis_Para_Administradores"].keys()):
group_id = data["Analisis_Para_Administradores"][user_id]
if(data["Grupos"].get(group_id, False) != False and
user_id in data["Grupos"][group_id]["Administradores"]):
result = json.loads(data["Resultados"][group_id]["resultado"])
moda = data["Resultados"][group_id]["moda"]
message_text = ""
message_text = "Medias: \n"
for d in result:
if | pd.isna(result[d]) | pandas.isna |
# Color Corrections Functions
import os
import cv2
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plantcv.plantcv import plot_image
from plantcv.plantcv.roi import circle
from plantcv.plantcv import print_image
from plantcv.plantcv import fatal_error
def get_color_matrix(rgb_img, mask):
""" Calculate the average value of pixels in each color chip for each color channel.
Inputs:
rgb_img = RGB image with color chips visualized
mask = a gray-scale img with unique values for each segmented space, representing unique, discrete
color chips.
Outputs:
color_matrix = a 22x4 matrix containing the average red value, average green value, and average blue value
for each color chip.
headers = a list of 4 headers corresponding to the 4 columns of color_matrix respectively
:param rgb_img: numpy.ndarray
:param mask: numpy.ndarray
:return headers: string array
:return color_matrix: numpy.ndarray
"""
# Check for RGB input
if len(np.shape(rgb_img)) != 3:
fatal_error("Input rgb_img is not an RGB image.")
# Check mask for gray-scale
if len(np.shape(mask)) != 2:
fatal_error("Input mask is not an gray-scale image.")
# create empty color_matrix
color_matrix = np.zeros((len(np.unique(mask))-1, 4))
# create headers
headers = ["chip_number", "r_avg", "g_avg", "b_avg"]
# declare row_counter variable and initialize to 0
row_counter = 0
# for each unique color chip calculate each average RGB value
for i in np.unique(mask):
if i != 0:
chip = rgb_img[np.where(mask == i)]
color_matrix[row_counter][0] = i
color_matrix[row_counter][1] = np.mean(chip[:, 2])
color_matrix[row_counter][2] = np.mean(chip[:, 1])
color_matrix[row_counter][3] = np.mean(chip[:, 0])
row_counter += 1
return headers, color_matrix
def get_matrix_m(target_matrix, source_matrix):
""" Calculate Moore-Penrose inverse matrix for use in calculating transformation_matrix
Inputs:
target_matrix = a 22x4 matrix containing the average red value, average green value, and average blue value
for each color chip.
source_matrix = a 22x4 matrix containing the average red value, average green value, and average blue value
for each color chip.
Outputs:
matrix_a = a concatenated 22x9 matrix of source_matrix red, green, and blue values to the powers 1, 2, 3
matrix_m = a 9x22 Moore-Penrose inverse matrix
matrix_b = a 22x9 matrix of linear, square, and cubic rgb values from target_img
:param target_matrix: numpy.ndarray
:param source_matrix: numpy.ndarray
:return matrix_a: numpy.ndarray
:return matrix_m: numpy.ndarray
:return matrix_b: numpy.ndarray
"""
# if the number of chips in source_img match the number of chips in target_matrix
if np.shape(target_matrix) == np.shape(source_matrix):
t_cc, t_r, t_g, t_b = np.split(target_matrix, 4, 1)
s_cc, s_r, s_g, s_b = np.split(source_matrix, 4, 1)
else:
combined_matrix = np.zeros((np.ma.size(source_matrix, 0), 7))
row_count = 0
for r in range(0, np.ma.size(target_matrix, 0)):
for i in range(0, np.ma.size(source_matrix, 0)):
if target_matrix[r][0] == source_matrix[i][0]:
combined_matrix[row_count][0] = target_matrix[r][0]
combined_matrix[row_count][1] = target_matrix[r][1]
combined_matrix[row_count][2] = target_matrix[r][2]
combined_matrix[row_count][3] = target_matrix[r][3]
combined_matrix[row_count][4] = source_matrix[i][1]
combined_matrix[row_count][5] = source_matrix[i][2]
combined_matrix[row_count][6] = source_matrix[i][3]
row_count += 1
t_cc, t_r, t_g, t_b, s_r, s_g, s_b = np.split(combined_matrix, 7, 1)
t_r2 = np.square(t_r)
t_r3 = np.power(t_r, 3)
t_g2 = np.square(t_g)
t_g3 = np.power(t_g, 3)
t_b2 = np.square(t_b)
t_b3 = np.power(t_b, 3)
s_r2 = np.square(s_r)
s_r3 = np.power(s_r, 3)
s_g2 = np.square(s_g)
s_g3 = np.power(s_g, 3)
s_b2 = np.square(s_b)
s_b3 = np.power(s_b, 3)
# create matrix_a
matrix_a = np.concatenate((s_r, s_g, s_b, s_b2, s_g2, s_r2, s_b3, s_g3, s_r3), 1)
# create matrix_m
matrix_m = np.linalg.solve(np.matmul(matrix_a.T, matrix_a), matrix_a.T)
# create matrix_b
matrix_b = np.concatenate((t_r, t_r2, t_r3, t_g, t_g2, t_g3, t_b, t_b2, t_b3), 1)
return matrix_a, matrix_m, matrix_b
def calc_transformation_matrix(matrix_m, matrix_b):
""" Calculates transformation matrix (transformation_matrix).
Inputs:
matrix_m = a 9x22 Moore-Penrose inverse matrix
matrix_b = a 22x9 matrix of linear, square, and cubic rgb values from target_img
Outputs:
1-t_det = "deviance" the measure of how greatly the source image deviates from the target image's color space.
Two images of the same color space should have a deviance of ~0.
transformation_matrix = a 9x9 matrix of linear, square, and cubic transformation coefficients
:param matrix_m: numpy.ndarray
:param matrix_b: numpy.ndarray
:return red: numpy.ndarray
:return blue: numpy.ndarray
:return green: numpy.ndarray
:return 1-t_det: float
:return transformation_matrix: numpy.ndarray
"""
# check matrix_m and matrix_b are matrices
if len(np.shape(matrix_b)) != 2 or len(np.shape(matrix_m)) != 2:
fatal_error("matrix_m and matrix_b must be n x m matrices such that m,n != 1.")
# check matrix_b has 9 columns
if np.shape(matrix_b)[1] != 9:
fatal_error("matrix_b must have 9 columns.")
# check matrix_m and matrix_b for multiplication
if np.shape(matrix_m)[0] != np.shape(matrix_b)[1] or np.shape(matrix_m)[1] != np.shape(matrix_b)[0]:
fatal_error("Cannot multiply matrices.")
t_r, t_r2, t_r3, t_g, t_g2, t_g3, t_b, t_b2, t_b3 = np.split(matrix_b, 9, 1)
# multiply each 22x1 matrix from target color space by matrix_m
red = np.matmul(matrix_m, t_r)
green = np.matmul(matrix_m, t_g)
blue = np.matmul(matrix_m, t_b)
red2 = np.matmul(matrix_m, t_r2)
green2 = np.matmul(matrix_m, t_g2)
blue2 = np.matmul(matrix_m, t_b2)
red3 = np.matmul(matrix_m, t_r3)
green3 = np.matmul(matrix_m, t_g3)
blue3 = np.matmul(matrix_m, t_b3)
# concatenate each product column into 9X9 transformation matrix
transformation_matrix = np.concatenate((red, green, blue, red2, green2, blue2, red3, green3, blue3), 1)
# find determinant of transformation matrix
t_det = np.linalg.det(transformation_matrix)
return 1-t_det, transformation_matrix
def apply_transformation_matrix(source_img, target_img, transformation_matrix):
""" Apply the transformation matrix to the source_image.
Inputs:
source_img = an RGB image to be corrected to the target color space
target_img = an RGB image with the target color space
transformation_matrix = a 9x9 matrix of tranformation coefficients
Outputs:
corrected_img = an RGB image in correct color space
:param source_img: numpy.ndarray
:param target_img: numpy.ndarray
:param transformation_matrix: numpy.ndarray
:return corrected_img: numpy.ndarray
"""
# check transformation_matrix for 9x9
if np.shape(transformation_matrix) != (9, 9):
fatal_error("transformation_matrix must be a 9x9 matrix of transformation coefficients.")
# Check for RGB input
if len(np.shape(source_img)) != 3:
fatal_error("Source_img is not an RGB image.")
# Autoincrement the device counter
params.device += 1
# split transformation_matrix
red, green, blue, red2, green2, blue2, red3, green3, blue3 = np.split(transformation_matrix, 9, 1)
# find linear, square, and cubic values of source_img color channels
source_b, source_g, source_r = cv2.split(source_img)
source_b2 = np.square(source_b)
source_b3 = np.power(source_b, 3)
source_g2 = np.square(source_g)
source_g3 = np.power(source_g, 3)
source_r2 = np.square(source_r)
source_r3 = np.power(source_r, 3)
# apply linear model to source color channels
b = 0 + source_r * blue[0] + source_g * blue[1] + source_b * blue[2] + source_r2 * blue[3] + source_g2 * blue[
4] + source_b2 * blue[5] + source_r3 * blue[6] + source_g3 * blue[7] + source_b3 * blue[8]
g = 0 + source_r * green[0] + source_g * green[1] + source_b * green[2] + source_r2 * green[3] + source_g2 * green[
4] + source_b2 * green[5] + source_r3 * green[6] + source_g3 * green[7] + source_b3 * green[8]
r = 0 + source_r * red[0] + source_g * red[1] + source_b * red[2] + source_r2 * red[3] + source_g2 * red[
4] + source_b2 * red[5] + source_r3 * red[6] + source_g3 * red[7] + source_b3 * red[8]
# merge corrected color channels onto source_image
bgr = [b, g, r]
corrected_img = cv2.merge(bgr)
# round corrected_img elements to be within range and of the correct data type
corrected_img = np.rint(corrected_img)
corrected_img[np.where(corrected_img > 255)] = 255
corrected_img = corrected_img.astype(np.uint8)
if params.debug == "print":
# If debug is print, save the image to a file
print_image(corrected_img, os.path.join(params.debug_outdir, str(params.device) + "_corrected.png"))
elif params.debug == "plot":
# If debug is plot, print a horizontal view of source_img, corrected_img, and target_img to the plotting device
# plot horizontal comparison of source_img, corrected_img (with rounded elements) and target_img
plot_image(np.hstack([source_img, corrected_img, target_img]))
# return corrected_img
return corrected_img
def save_matrix(matrix, filename):
""" Serializes a matrix as an numpy.ndarray object and save to a .npz file.
Inputs:
matrix = a numpy.matrix
filename = name of file to which matrix will be saved. Must end in .npz
:param matrix: numpy.ndarray
:param filename: string ending in ".npz"
"""
if ".npz" not in filename:
fatal_error("File must be an .npz file.")
# Autoincrement the device counter
params.device += 1
np.savez(filename, matrix)
def load_matrix(filename):
""" Deserializes from file an numpy.ndarray object as a matrix
Inputs:
filename = .npz file to which a numpy.matrix or numpy.ndarray is saved
Outputs:
matrix = a numpy.matrix
:param filename: string ending in ".npz"
:return matrix: numpy.matrix
"""
matrix_file = np.load(filename, encoding="latin1")
matrix = matrix_file['arr_0']
np.asmatrix(matrix)
return matrix
def correct_color(target_img, target_mask, source_img, source_mask, output_directory):
"""Takes a target_img with preferred color_space and converts source_img to that color_space.
Inputs:
target_img = an RGB image with color chips visualized
source_img = an RGB image with color chips visualized
target_mask = a gray-scale image with color chips and background each represented with unique values
target_mask = a gray-scale image with color chips and background each represented as unique values
output_directory = a file path to which outputs will be saved
Outputs:
target_matrix = saved in .npz file, a 22x4 matrix containing the average red value, average green value, and
average blue value for each color chip.
source_matrix = saved in .npz file, a 22x4 matrix containing the average red value, average green value, and
average blue value for each color chip.
transformation_matrix = saved in .npz file, a 9x9 transformation matrix
corrected_img = the source_img converted to the correct color space.
:param target_img: numpy.ndarray
:param source_img: numpy.ndarray
:param target_mask: numpy.ndarray
:param source_mask: numpy.ndarray
:param output_directory: string
:return target_matrix: numpy.matrix
:return source_matrix: numpy.matrix
:return transformation_matrix: numpy.matrix
:return corrected_img: numpy.ndarray
"""
# check output_directory, if it does not exist, create
if not os.path.exists(output_directory):
os.mkdir(output_directory)
# get color matrices for target and source images
target_headers, target_matrix = get_color_matrix(target_img, target_mask)
source_headers, source_matrix = get_color_matrix(source_img, source_mask)
# save target and source matrices
save_matrix(target_matrix, os.path.join(output_directory, "target_matrix.npz"))
save_matrix(source_matrix, os.path.join(output_directory, "source_matrix.npz"))
# get matrix_m
matrix_a, matrix_m, matrix_b = get_matrix_m(target_matrix=target_matrix, source_matrix=source_matrix)
# calculate transformation_matrix and save
deviance, transformation_matrix = calc_transformation_matrix(matrix_m, matrix_b)
save_matrix(transformation_matrix, os.path.join(output_directory, "transformation_matrix.npz"))
# apply transformation
corrected_img = apply_transformation_matrix(source_img, target_img, transformation_matrix)
return target_matrix, source_matrix, transformation_matrix, corrected_img
def create_color_card_mask(rgb_img, radius, start_coord, spacing, nrows, ncols, exclude=[]):
"""Create a labeled mask for color card chips
Inputs:
rgb_img = Input RGB image data containing a color card.
radius = Radius of color masks.
start_coord = Two-element tuple of the first chip mask starting x and y coordinate.
spacing = Two-element tuple of the horizontal and vertical spacing between chip masks.
nrows = Number of chip rows.
ncols = Number of chip columns.
exclude = Optional list of chips to exclude.
Returns:
mask = Labeled mask of chips
:param rgb_img: numpy.ndarray
:param radius: int
:param start_coord: tuple
:param spacing: tuple
:param nrows: int
:param ncols: int
:param exclude: list
:return mask: numpy.ndarray
"""
# Autoincrement the device counter
params.device += 1
# Initialize chip list
chips = []
# Loop over each color card row
for i in range(0, nrows):
# The upper left corner is the y starting coordinate + the chip offset * the vertical spacing between chips
y = start_coord[1] + i * spacing[1]
# Loop over each column
for j in range(0, ncols):
# The upper left corner is the x starting coordinate + the chip offset * the
# horizontal spacing between chips
x = start_coord[0] + j * spacing[0]
# Create a chip ROI
chips.append(circle(img=rgb_img, x=x, y=y, r=radius))
# Sort excluded chips from largest to smallest
exclude.sort(reverse=True)
# Remove any excluded chips
for chip in exclude:
del chips[chip]
# Create mask
mask = np.zeros(shape=np.shape(rgb_img)[:2], dtype=np.uint8())
# Mask label index
i = 1
# Draw labeled chip boxes on the mask
for chip in chips:
mask = cv2.drawContours(mask, chip[0], -1, (i * 10), -1)
i += 1
if params.debug is not None:
# Create a copy of the input image for plotting
canvas = np.copy(rgb_img)
# Draw chip ROIs on the canvas image
for chip in chips:
cv2.drawContours(canvas, chip[0], -1, (255, 255, 0), params.line_thickness)
if params.debug == "print":
print_image(img=canvas, filename=os.path.join(params.debug_outdir,
str(params.device) + "_color_card_mask_rois.png"))
print_image(img=mask, filename=os.path.join(params.debug_outdir,
str(params.device) + "_color_card_mask.png"))
elif params.debug == "plot":
plot_image(canvas)
return mask
def quick_color_check(target_matrix, source_matrix, num_chips):
""" Quickly plot target matrix values against source matrix values to determine
over saturated color chips or other issues.
Inputs:
source_matrix = an nrowsXncols matrix containing the avg red, green, and blue values for each color chip
of the source image
target_matrix = an nrowsXncols matrix containing the avg red, green, and blue values for each color chip
of the target image
num_chips = number of color card chips included in the matrices (integer)
:param source_matrix: numpy.ndarray
:param target_matrix: numpy.ndarray
:param num_chips: int
"""
# Imports
from plotnine import ggplot, geom_point, geom_smooth, theme_seaborn, facet_grid, geom_label, scale_x_continuous, \
scale_y_continuous, scale_color_manual, aes
import pandas as pd
# Extract and organize matrix info
tr = target_matrix[:num_chips, 1:2]
tg = target_matrix[:num_chips, 2:3]
tb = target_matrix[:num_chips, 3:4]
sr = source_matrix[:num_chips, 1:2]
sg = source_matrix[:num_chips, 2:3]
sb = source_matrix[:num_chips, 3:4]
# Create columns of color labels
red = []
blue = []
green = []
for i in range(num_chips):
red.append('red')
blue.append('blue')
green.append('green')
# Make a column of chip numbers
chip = np.arange(0, num_chips).reshape((num_chips, 1))
chips = np.row_stack((chip, chip, chip))
# Combine info
color_data_r = np.column_stack((sr, tr, red))
color_data_g = np.column_stack((sg, tg, green))
color_data_b = np.column_stack((sb, tb, blue))
all_color_data = np.row_stack((color_data_b, color_data_g, color_data_r))
# Create a dataframe with headers
dataset = pd.DataFrame({'source': all_color_data[:, 0], 'target': all_color_data[:, 1],
'color': all_color_data[:, 2]})
# Add chip numbers to the dataframe
dataset['chip'] = chips
dataset = dataset.astype({'color': str, 'chip': str, 'target': float, 'source': float})
# Make the plot
p1 = ggplot(dataset, aes(x='target', y='source', color='color', label='chip')) + \
geom_point(show_legend=False, size=2) + \
geom_smooth(method='lm', size=.5, show_legend=False) + \
theme_seaborn() + facet_grid('.~color') + \
geom_label(angle=15, size=7, nudge_y=-.25, nudge_x=.5, show_legend=False) + \
scale_x_continuous(limits=(-5, 270)) + scale_y_continuous(limits=(-5, 275)) + \
scale_color_manual(values=['blue', 'green', 'red'])
# Autoincrement the device counter
params.device += 1
# Reset debug
if params.debug is not None:
if params.debug == 'print':
p1.save(os.path.join(params.debug_outdir, 'color_quick_check.png'), verbose=False)
elif params.debug == 'plot':
print(p1)
def find_color_card(rgb_img, threshold_type='adaptgauss', threshvalue=125, blurry=False, background='dark',
record_chip_size="median"):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Algorithm written by <NAME>. Updated and implemented into PlantCV by <NAME>.
Inputs:
rgb_img = Input RGB image data containing a color card.
threshold_type = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss')
threshvalue = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light' (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
record_chip_size = Optional str for choosing chip size measurement to be recorded, either "median",
"mean", or None
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param rgb_img: numpy.ndarray
:param threshold_type: str
:param threshvalue: int
:param blurry: bool
:param background: str
:param record_chip_size: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = rgb_img.shape
total_pix = float(height * width)
# Minimum and maximum square size based upon 12 MP image
min_area = 1000. / 12000000. * total_pix
max_area = 8000000. / 12000000. * total_pix
# Create gray image for further processing
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold_type.upper() == "OTSU":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold_type.upper() == "NORMAL":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold_type.upper() == "ADAPTGAUSS":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Input threshold_type=' + str(threshold_type) + ' but should be "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
canny_edges = skimage.feature.canny(threshold)
canny_edges.dtype = 'uint8'
# Compute contours to find the squares of the card
contours, hierarchy = cv2.findContours(canny_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and min_area < marea[index] < max_area:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.1 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
# In different versions of OpenCV, width and height can be listed in a different order
# To normalize the ratio we sort them and take the ratio of the longest / shortest
wh_sorted = list(wh)
wh_sorted.sort()
mwhratio.append(wh_sorted[1] / wh_sorted[0])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) == 4 or len(approx) == 5:
msquarecoords.append(approx)
else: # It's not square
# msquare.append(0)
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
all_contours = {'index': mindex, 'x': mx, 'y': my, 'width': mwidth, 'height': mheight, 'res_ratio': mwhratio,
'area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(all_contours)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['area'] > min_area) & (df['area'] < max_area) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['res_ratio'] < 1.2) & (df['res_ratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card isolate area measurements
filtered_area = df['area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = | pd.DataFrame(sizecomp) | pandas.DataFrame |
# -*- coding:utf-8 -*-
"""
股票技术指标接口
Created on 2018/07/26
@author: Wangzili
@group : **
@contact: <EMAIL>
所有指标中参数df为通过get_k_data获取的股票数据
"""
import pandas as pd
import numpy as np
import itertools
def ma(df, n=10):
"""
移动平均线 Moving Average
MA(N)=(第1日收盘价+第2日收盘价—+……+第N日收盘价)/N
"""
pv = pd.DataFrame()
pv['date'] = df['date']
pv['v'] = df.close.rolling(n).mean()
return pv
def _ma(series, n):
"""
移动平均
"""
return series.rolling(n).mean()
def md(df, n=10):
"""
移动标准差
STD=S(CLOSE,N)=[∑(CLOSE-MA(CLOSE,N))^2/N]^0.5
"""
_md = pd.DataFrame()
_md['date'] = df.date
_md["md"] = df.close.rolling(n).std(ddof=0)
return _md
def _md(series, n):
"""
标准差MD
"""
return series.rolling(n).std(ddof=0) # 有时候会用ddof=1
def ema(df, n=12):
"""
指数平均数指标 Exponential Moving Average
今日EMA(N)=2/(N+1)×今日收盘价+(N-1)/(N+1)×昨日EMA(N)
EMA(X,N)=[2×X+(N-1)×EMA(ref(X),N]/(N+1)
"""
_ema = pd.DataFrame()
_ema['date'] = df['date']
_ema['ema'] = df.close.ewm(ignore_na=False, span=n, min_periods=0, adjust=False).mean()
return _ema
def _ema(series, n):
"""
指数平均数
"""
return series.ewm(ignore_na=False, span=n, min_periods=0, adjust=False).mean()
def macd(df, n=12, m=26, k=9):
"""
平滑异同移动平均线(Moving Average Convergence Divergence)
今日EMA(N)=2/(N+1)×今日收盘价+(N-1)/(N+1)×昨日EMA(N)
DIFF= EMA(N1)- EMA(N2)
DEA(DIF,M)= 2/(M+1)×DIF +[1-2/(M+1)]×DEA(REF(DIF,1),M)
MACD(BAR)=2×(DIF-DEA)
return:
osc: MACD bar / OSC 差值柱形图 DIFF - DEM
diff: 差离值
dea: 讯号线
"""
_macd = pd.DataFrame()
_macd['date'] = df['date']
_macd['diff'] = _ema(df.close, n) - _ema(df.close, m)
_macd['dea'] = _ema(_macd['diff'], k)
_macd['macd'] = _macd['diff'] - _macd['dea']
return _macd
def kdj(df, n=9):
"""
随机指标KDJ
N日RSV=(第N日收盘价-N日内最低价)/(N日内最高价-N日内最低价)×100%
当日K值=2/3前1日K值+1/3×当日RSV=SMA(RSV,M1)
当日D值=2/3前1日D值+1/3×当日K= SMA(K,M2)
当日J值=3 ×当日K值-2×当日D值
"""
_kdj = pd.DataFrame()
_kdj['date'] = df['date']
rsv = (df.close - df.low.rolling(n).min()) / (df.high.rolling(n).max() - df.low.rolling(n).min()) * 100
_kdj['k'] = sma(rsv, 3)
_kdj['d'] = sma(_kdj.k, 3)
_kdj['j'] = 3 * _kdj.k - 2 * _kdj.d
return _kdj
def rsi(df, n=6):
"""
相对强弱指标(Relative Strength Index,简称RSI
LC= REF(CLOSE,1)
RSI=SMA(MAX(CLOSE-LC,0),N,1)/SMA(ABS(CLOSE-LC),N1,1)×100
SMA(C,N,M)=M/N×今日收盘价+(N-M)/N×昨日SMA(N)
"""
# pd.set_option('display.max_rows', 1000)
_rsi = pd.DataFrame()
_rsi['date'] = df['date']
px = df.close - df.close.shift(1)
px[px < 0] = 0
_rsi['rsi'] = sma(px, n) / sma((df['close'] - df['close'].shift(1)).abs(), n) * 100
# def tmax(x):
# if x < 0:
# x = 0
# return x
# _rsi['rsi'] = sma((df['close'] - df['close'].shift(1)).apply(tmax), n) / sma((df['close'] - df['close'].shift(1)).abs(), n) * 100
return _rsi
def vrsi(df, n=6):
"""
量相对强弱指标
VRSI=SMA(最大值(成交量-REF(成交量,1),0),N,1)/SMA(ABS((成交量-REF(成交量,1),N,1)×100%
"""
_vrsi = pd.DataFrame()
_vrsi['date'] = df['date']
px = df['volume'] - df['volume'].shift(1)
px[px < 0] = 0
_vrsi['vrsi'] = sma(px, n) / sma((df['volume'] - df['volume'].shift(1)).abs(), n) * 100
return _vrsi
def boll(df, n=26, k=2):
"""
布林线指标BOLL boll(26,2) MID=MA(N)
标准差MD=根号[∑(CLOSE-MA(CLOSE,N))^2/N]
UPPER=MID+k×MD
LOWER=MID-k×MD
"""
_boll = pd.DataFrame()
_boll['date'] = df.date
_boll['mid'] = _ma(df.close, n)
_mdd = _md(df.close, n)
_boll['up'] = _boll.mid + k * _mdd
_boll['low'] = _boll.mid - k * _mdd
return _boll
def bbiboll(df, n=10, k=3):
"""
BBI多空布林线 bbiboll(10,3)
BBI={MA(3)+ MA(6)+ MA(12)+ MA(24)}/4
标准差MD=根号[∑(BBI-MA(BBI,N))^2/N]
UPR= BBI+k×MD
DWN= BBI-k×MD
"""
# pd.set_option('display.max_rows', 1000)
_bbiboll = pd.DataFrame()
_bbiboll['date'] = df.date
_bbiboll['bbi'] = (_ma(df.close, 3) + _ma(df.close, 6) + _ma(df.close, 12) + _ma(df.close, 24)) / 4
_bbiboll['md'] = _md(_bbiboll.bbi, n)
_bbiboll['upr'] = _bbiboll.bbi + k * _bbiboll.md
_bbiboll['dwn'] = _bbiboll.bbi - k * _bbiboll.md
return _bbiboll
def wr(df, n=14):
"""
威廉指标 w&r
WR=[最高值(最高价,N)-收盘价]/[最高值(最高价,N)-最低值(最低价,N)]×100%
"""
_wr = pd.DataFrame()
_wr['date'] = df['date']
higest = df.high.rolling(n).max()
_wr['wr'] = (higest - df.close) / (higest - df.low.rolling(n).min()) * 100
return _wr
def bias(df, n=12):
"""
乖离率 bias
bias=[(当日收盘价-12日平均价)/12日平均价]×100%
"""
_bias = pd.DataFrame()
_bias['date'] = df.date
_mav = df.close.rolling(n).mean()
_bias['bias'] = (np.true_divide((df.close - _mav), _mav)) * 100
# _bias["bias"] = np.vectorize(lambda x: round(Decimal(x), 4))(BIAS)
return _bias
def asi(df, n=5):
"""
振动升降指标(累计震动升降因子) ASI # 同花顺给出的公式不完整就不贴出来了
"""
_asi = pd.DataFrame()
_asi['date'] = df.date
_m = pd.DataFrame()
_m['a'] = (df.high - df.close.shift()).abs()
_m['b'] = (df.low - df.close.shift()).abs()
_m['c'] = (df.high - df.low.shift()).abs()
_m['d'] = (df.close.shift() - df.open.shift()).abs()
_m['r'] = _m.apply(lambda x: x.a + 0.5 * x.b + 0.25 * x.d if max(x.a, x.b, x.c) == x.a else (
x.b + 0.5 * x.a + 0.25 * x.d if max(x.a, x.b, x.c) == x.b else x.c + 0.25 * x.d
), axis=1)
_m['x'] = df.close - df.close.shift() + 0.5 * (df.close - df.open) + df.close.shift() - df.open.shift()
_m['k'] = np.maximum(_m.a, _m.b)
_asi['si'] = 16 * (_m.x / _m.r) * _m.k
_asi["asi"] = _ma(_asi.si, n)
return _asi
def vr_rate(df, n=26):
"""
成交量变异率 vr or vr_rate
VR=(AVS+1/2CVS)/(BVS+1/2CVS)×100
其中:
AVS:表示N日内股价上涨成交量之和
BVS:表示N日内股价下跌成交量之和
CVS:表示N日内股价不涨不跌成交量之和
"""
_vr = pd.DataFrame()
_vr['date'] = df['date']
_m = pd.DataFrame()
_m['volume'] = df.volume
_m['cs'] = df.close - df.close.shift(1)
_m['avs'] = _m.apply(lambda x: x.volume if x.cs > 0 else 0, axis=1)
_m['bvs'] = _m.apply(lambda x: x.volume if x.cs < 0 else 0, axis=1)
_m['cvs'] = _m.apply(lambda x: x.volume if x.cs == 0 else 0, axis=1)
_vr["vr"] = (_m.avs.rolling(n).sum() + 1 / 2 * _m.cvs.rolling(n).sum()
) / (_m.bvs.rolling(n).sum() + 1 / 2 * _m.cvs.rolling(n).sum()) * 100
return _vr
def vr(df, n=5):
"""
开市后平均每分钟的成交量与过去5个交易日平均每分钟成交量之比
量比:=V/REF(MA(V,5),1);
涨幅:=(C-REF(C,1))/REF(C,1)*100;
1)量比大于1.8,涨幅小于2%,现价涨幅在0—2%之间,在盘中选股的
选股:量比>1.8 AND 涨幅>0 AND 涨幅<2;
"""
_vr = pd.DataFrame()
_vr['date'] = df.date
_vr['vr'] = df.volume / _ma(df.volume, n).shift(1)
_vr['rr'] = (df.close - df.close.shift(1)) / df.close.shift(1) * 100
return _vr
def arbr(df, n=26):
"""
人气意愿指标 arbr(26)
N日AR=N日内(H-O)之和除以N日内(O-L)之和
其中,H为当日最高价,L为当日最低价,O为当日开盘价,N为设定的时间参数,一般原始参数日设定为26日
N日BR=N日内(H-CY)之和除以N日内(CY-L)之和
其中,H为当日最高价,L为当日最低价,CY为前一交易日的收盘价,N为设定的时间参数,一般原始参数日设定为26日。
"""
_arbr = pd.DataFrame()
_arbr['date'] = df.date
_arbr['ar'] = (df.high - df.open).rolling(n).sum() / (df.open - df.low).rolling(n).sum() * 100
_arbr['br'] = (df.high - df.close.shift(1)).rolling(n).sum() / (df.close.shift() - df.low).rolling(n).sum() * 100
return _arbr
def dpo(df, n=20, m=6):
"""
区间震荡线指标 dpo(20,6)
DPO=CLOSE-MA(CLOSE, N/2+1)
MADPO=MA(DPO,M)
"""
_dpo = pd.DataFrame()
_dpo['date'] = df['date']
_dpo['dpo'] = df.close - _ma(df.close, int(n / 2 + 1))
_dpo['dopma'] = _ma(_dpo.dpo, m)
return _dpo
def trix(df, n=12, m=20):
"""
三重指数平滑平均 TRIX(12)
TR= EMA(EMA(EMA(CLOSE,N),N),N),即进行三次平滑处理
TRIX=(TR-昨日TR)/ 昨日TR×100
TRMA=MA(TRIX,M)
"""
_trix = pd.DataFrame()
_trix['date'] = df.date
tr = _ema(_ema(_ema(df.close, n), n), n)
_trix['trix'] = (tr - tr.shift()) / tr.shift() * 100
_trix['trma'] = _ma(_trix.trix, m)
return _trix
def bbi(df):
"""
多空指数 BBI(3,6,12,24)
BBI=(3日均价+6日均价+12日均价+24日均价)/4
"""
_bbi = pd.DataFrame()
_bbi['date'] = df['date']
_bbi['bbi'] = (_ma(df.close, 3) + _ma(df.close, 6) + _ma(df.close, 12) + _ma(df.close, 24)) / 4
return _bbi
def mtm(df, n=6, m=5):
"""
动力指标 MTM(6,5)
MTM(N日)=C-REF(C,N)式中,C=当日的收盘价,REF(C,N)=N日前的收盘价;N日是只计算交易日期,剔除掉节假日。
MTMMA(MTM,N1)= MA(MTM,N1)
N表示间隔天数,N1表示天数
"""
_mtm = pd.DataFrame()
_mtm['date'] = df.date
_mtm['mtm'] = df.close - df.close.shift(n)
_mtm['mtmma'] = _ma(_mtm.mtm, m)
return _mtm
def obv(df):
"""
能量潮 On Balance Volume
多空比率净额= [(收盘价-最低价)-(最高价-收盘价)] ÷( 最高价-最低价)×V # 同花顺貌似用的下面公式
主公式:当日OBV=前一日OBV+今日成交量
1.基期OBV值为0,即该股上市的第一天,OBV值为0
2.若当日收盘价>上日收盘价,则当日OBV=前一日OBV+今日成交量
3.若当日收盘价<上日收盘价,则当日OBV=前一日OBV-今日成交量
4.若当日收盘价=上日收盘价,则当日OBV=前一日OBV
"""
_obv = pd.DataFrame()
_obv["date"] = df['date']
# tmp = np.true_divide(((df.close - df.low) - (df.high - df.close)), (df.high - df.low))
# _obv['obvv'] = tmp * df.volume
# _obv["obv"] = _obv.obvv.expanding(1).sum() / 100
_m = pd.DataFrame()
_m['date'] = df.date
_m['cs'] = df.close - df.close.shift()
_m['v'] = df.volume
_m['vv'] = _m.apply(lambda x: x.v if x.cs > 0 else (-x.v if x.cs < 0 else 0), axis=1)
_obv['obv'] = _m.vv.expanding(1).sum()
return _obv
def cci(df, n=14):
"""
顺势指标
TYP:=(HIGH+LOW+CLOSE)/3
CCI:=(TYP-MA(TYP,N))/(0.015×AVEDEV(TYP,N))
"""
_cci = pd.DataFrame()
_cci["date"] = df['date']
typ = (df.high + df.low + df.close) / 3
_cci['cci'] = ((typ - typ.rolling(n).mean()) /
(0.015 * typ.rolling(min_periods=1, center=False, window=n).apply(
lambda x: np.fabs(x - x.mean()).mean())))
return _cci
def priceosc(df, n=12, m=26):
"""
价格振动指数
PRICEOSC=(MA(C,12)-MA(C,26))/MA(C,12) * 100
"""
_c = pd.DataFrame()
_c['date'] = df['date']
man = _ma(df.close, n)
_c['osc'] = (man - _ma(df.close, m)) / man * 100
return _c
def sma(a, n, m=1):
"""
平滑移动指标 Smooth Moving Average
"""
''' # 方法一,此方法有缺陷
_sma = []
for index, value in enumerate(a):
if index == 0 or pd.isna(value) or np.isnan(value):
tsma = 0
else:
# Y=(M*X+(N-M)*Y')/N
tsma = (m * value + (n - m) * tsma) / n
_sma.append(tsma)
return pd.Series(_sma)
'''
''' # 方法二
results = np.nan_to_num(a).copy()
# FIXME this is very slow
for i in range(1, len(a)):
results[i] = (m * results[i] + (n - m) * results[i - 1]) / n
# results[i] = ((n - 1) * results[i - 1] + results[i]) / n
# return results
'''
# b = np.nan_to_num(a).copy()
# return ((n - m) * a.shift(1) + m * a) / n
a = a.fillna(0)
b = a.ewm(min_periods=0, ignore_na=False, adjust=False, alpha=m/n).mean()
return b
def dbcd(df, n=5, m=16, t=76):
"""
异同离差乖离率 dbcd(5,16,76)
BIAS=(C-MA(C,N))/MA(C,N)
DIF=(BIAS-REF(BIAS,M))
DBCD=SMA(DIF,T,1) =(1-1/T)×SMA(REF(DIF,1),T,1)+ 1/T×DIF
MM=MA(DBCD,5)
"""
_dbcd = pd.DataFrame()
_dbcd['date'] = df.date
man = _ma(df.close, n)
_bias = (df.close - man) / man
_dif = _bias - _bias.shift(m)
_dbcd['dbcd'] = sma(_dif, t)
_dbcd['mm'] = _ma(_dbcd.dbcd, n)
return _dbcd
def roc(df, n=12, m=6):
"""
变动速率 roc(12,6)
ROC=(今日收盘价-N日前的收盘价)/ N日前的收盘价×100%
ROCMA=MA(ROC,M)
ROC:(CLOSE-REF(CLOSE,N))/REF(CLOSE,N)×100
ROCMA:MA(ROC,M)
"""
_roc = pd.DataFrame()
_roc['date'] = df['date']
_roc['roc'] = (df.close - df.close.shift(n))/df.close.shift(n) * 100
_roc['rocma'] = _ma(_roc.roc, m)
return _roc
def vroc(df, n=12):
"""
量变动速率
VROC=(当日成交量-N日前的成交量)/ N日前的成交量×100%
"""
_vroc = pd.DataFrame()
_vroc['date'] = df['date']
_vroc['vroc'] = (df.volume - df.volume.shift(n)) / df.volume.shift(n) * 100
return _vroc
def cr(df, n=26):
""" 能量指标
CR=∑(H-PM)/∑(PM-L)×100
PM:上一交易日中价((最高、最低、收盘价的均值)
H:当天最高价
L:当天最低价
"""
_cr = pd.DataFrame()
_cr['date'] = df.date
# pm = ((df['high'] + df['low'] + df['close']) / 3).shift(1)
pm = (df[['high', 'low', 'close']]).mean(axis=1).shift(1)
_cr['cr'] = (df.high - pm).rolling(n).sum()/(pm - df.low).rolling(n).sum() * 100
return _cr
def psy(df, n=12):
"""
心理指标 PSY(12)
PSY=N日内上涨天数/N×100
PSY:COUNT(CLOSE>REF(CLOSE,1),N)/N×100
MAPSY=PSY的M日简单移动平均
"""
_psy = pd.DataFrame()
_psy['date'] = df.date
p = df.close - df.close.shift()
p[p <= 0] = np.nan
_psy['psy'] = p.rolling(n).count() / n * 100
return _psy
def wad(df, n=30):
"""
威廉聚散指标 WAD(30)
TRL=昨日收盘价与今日最低价中价格最低者;TRH=昨日收盘价与今日最高价中价格最高者
如果今日的收盘价>昨日的收盘价,则今日的A/D=今日的收盘价-今日的TRL
如果今日的收盘价<昨日的收盘价,则今日的A/D=今日的收盘价-今日的TRH
如果今日的收盘价=昨日的收盘价,则今日的A/D=0
WAD=今日的A/D+昨日的WAD;MAWAD=WAD的M日简单移动平均
"""
def dmd(x):
if x.c > 0:
y = x.close - x.trl
elif x.c < 0:
y = x.close - x.trh
else:
y = 0
return y
_wad = pd.DataFrame()
_wad['date'] = df['date']
_ad = pd.DataFrame()
_ad['trl'] = np.minimum(df.low, df.close.shift(1))
_ad['trh'] = np.maximum(df.high, df.close.shift(1))
_ad['c'] = df.close - df.close.shift()
_ad['close'] = df.close
_ad['ad'] = _ad.apply(dmd, axis=1)
_wad['wad'] = _ad.ad.expanding(1).sum()
_wad['mawad'] = _ma(_wad.wad, n)
return _wad
def mfi(df, n=14):
"""
资金流向指标 mfi(14)
MF=TYP×成交量;TYP:当日中价((最高、最低、收盘价的均值)
如果当日TYP>昨日TYP,则将当日的MF值视为当日PMF值。而当日NMF值=0
如果当日TYP<=昨日TYP,则将当日的MF值视为当日NMF值。而当日PMF值=0
MR=∑PMF/∑NMF
MFI=100-(100÷(1+MR))
"""
_mfi = pd.DataFrame()
_mfi['date'] = df.date
_m = pd.DataFrame()
_m['typ'] = df[['high', 'low', 'close']].mean(axis=1)
_m['mf'] = _m.typ * df.volume
_m['typ_shift'] = _m.typ - _m.typ.shift(1)
_m['pmf'] = _m.apply(lambda x: x.mf if x.typ_shift > 0 else 0, axis=1)
_m['nmf'] = _m.apply(lambda x: x.mf if x.typ_shift <= 0 else 0, axis=1)
# _mfi['mfi'] = 100 - (100 / (1 + _m.pmf.rolling(n).sum() / _m.nmf.rolling(n).sum()))
_m['mr'] = _m.pmf.rolling(n).sum() / _m.nmf.rolling(n).sum()
_mfi['mfi'] = 100 * _m.mr / (1 + _m.mr) # 同花顺自己给出的公式和实际用的公式不一样,真操蛋,浪费两个小时时间
return _mfi
def pvt(df):
"""
pvt 量价趋势指标 pvt
如果设x=(今日收盘价—昨日收盘价)/昨日收盘价×当日成交量,
那么当日PVT指标值则为从第一个交易日起每日X值的累加。
"""
_pvt = | pd.DataFrame() | pandas.DataFrame |
import cv2
from PIL import Image
from pathlib import Path
import torch
from config import get_config
from api import face_recognize
from utils.utils import draw_box_name
import glob
import argparse
from tqdm import tqdm
import pandas as pd
import time
import os
if __name__ == '__main__':
base_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser = argparse.ArgumentParser(description="face recognition")
parser.add_argument('-image',type=str,help="-image path image")
# parser.add_argument('-path',type=str,help="-path path folder list image")
parser.add_argument('-csv',type=str,help="-path path to annotation.csv", default='%s/dataset/annotation.csv'%base_folder)
parser.add_argument('-path',type=str,help="-path path to image folder", default='%s/dataset/public_test'%base_folder)
parser.add_argument('-threshold', '--threshold',type=float,help="-threshold threshold", default=1.2)
parser.add_argument('-use_mtcnn', '--use_mtcnn',type=float,help="using mtcnn", default=1)
args = parser.parse_args()
conf = get_config(net_size = 'large', net_mode = 'ir_se',threshold = args.threshold, use_mtcnn = args.use_mtcnn)
face_recognize = face_recognize(conf)
targets , names = face_recognize.load_single_face(args.image)
submiter = [['image','x1','y1','x2','y2','result']]
sample_df = pd.read_csv(args.csv)
sample_list = list(sample_df.image)
for img in tqdm(sample_list):
temp = [img.split('/')[-1], 0, 0, 0, 0, 0]
for tp in ['.jpg', '.png', '.jpeg','.img', '.JPG', '.PNG', '.IMG', '.JPEG']:
img_path = '%s/%s%s'%(args.path, img, tp)
if os.path.isfile(img_path):
break
image = Image.open(img_path)
try:
bboxes, faces = face_recognize.align_multi(image)
except:
bboxes = []
faces = []
if len(bboxes) > 0:
bboxes = bboxes[:,:-1]
bboxes = bboxes.astype(int)
bboxes = bboxes + [-1,-1,1,1]
results, score, _ = face_recognize.infer(faces, targets)
for id,(re, sc) in enumerate(zip(results, score)):
if re != -1:
temp = [img.split('/')[-1].replace('.png', '.jpg'), bboxes[id][0], bboxes[id][1], bboxes[id][2], bboxes[id][3], 1]
print(img_path, results)
break
submiter.append(temp)
df = pd.DataFrame.from_records(submiter)
headers = df.iloc[0]
df = | pd.DataFrame(df.values[1:], columns=headers) | pandas.DataFrame |
from itertools import product
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from pandas import DataFrame
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
from linearmodels.system.model import IV3SLS
from linearmodels.tests.system._utility import (
generate_3sls_data,
generate_3sls_data_v2,
simple_3sls,
)
nexog = [3, [1, 2, 3, 4, 5]]
nendog = [2, [1, 2, 1, 2, 1]]
ninstr = [3, 2, [2, 3, 2, 3, 2]]
const = [True, False]
rho = [0.8, 0.0]
common_exog = [True, False]
included_weights = [True, False]
output_dict = [True, False]
params = list(
product(
nexog, nendog, ninstr, const, rho, common_exog, included_weights, output_dict
)
)
nexog = [[0, 1, 2]]
nendog = [[1, 0, 1]]
ninstr = [[2, 0, 1]]
# Explicitly test variables that have no columns
add_params = list(
product(
nexog, nendog, ninstr, const, rho, common_exog, included_weights, output_dict
)
)
params += add_params
def gen_id(param):
idstr = "homo" if isinstance(param[0], list) else "hetero"
idstr += "-homo_endog" if isinstance(param[1], list) else "-hetero_endog"
idstr += "-homo_instr" if isinstance(param[2], list) else "-hetero_instr"
idstr += "-const" if param[3] else ""
idstr += "-correl" if param[4] != 0 else ""
idstr += "-common" if param[5] else ""
idstr += "-weights" if param[6] else ""
idstr += "-dict" if param[7] else "-tuple"
return idstr
ids = [gen_id(param) for param in params]
@pytest.fixture(params=params, ids=ids)
def data(request):
p, en, instr, const, rho, common_exog, included_weights, output_dict = request.param
list_like = isinstance(p, list) or isinstance(en, list) or isinstance(instr, list)
k = 4
if common_exog and list_like:
p = 3
en = 2
instr = 3
elif list_like:
def safe_len(a):
a = np.array(a)
if a.ndim == 0:
return 0
return len(a)
k = max(map(safe_len, [p, en, instr]))
return generate_3sls_data(
n=250,
k=k,
p=p,
en=en,
instr=instr,
const=const,
rho=rho,
common_exog=common_exog,
included_weights=included_weights,
output_dict=output_dict,
)
def test_direct_simple(data):
mod = IV3SLS(data)
res = mod.fit(cov_type="unadjusted")
y = []
x = []
z = []
for key in data:
val = data[key]
if isinstance(val, tuple):
y.append(val[0])
nobs = val[0].shape[0]
v1 = val[1] if val[1] is not None else np.empty((nobs, 0))
v2 = val[2] if val[2] is not None else np.empty((nobs, 0))
v3 = val[3] if val[3] is not None else np.empty((nobs, 0))
x.append(np.concatenate([v1, v2], 1))
z.append(np.concatenate([v1, v3], 1))
if len(val) == 5:
return # weighted
else:
y.append(val["dependent"])
nobs = val["dependent"].shape[0]
vexog = val["exog"] if val["exog"] is not None else np.empty((nobs, 0))
vendog = val["endog"] if val["endog"] is not None else np.empty((nobs, 0))
vinstr = (
val["instruments"]
if val["instruments"] is not None
else np.empty((nobs, 0))
)
x.append(np.concatenate([vexog, vendog], 1))
z.append(np.concatenate([vexog, vinstr], 1))
if "weights" in val:
return # weighted
out = simple_3sls(y, x, z)
assert_allclose(res.params.values, out.beta1.squeeze())
assert_allclose(res.sigma, out.sigma)
assert_allclose(np.asarray(res.resids), out.eps, atol=1e-4)
assert_allclose(np.diag(res.cov), np.diag(out.cov))
def test_single_equation(data):
key = list(data.keys())[0]
data = {key: data[key]}
mod = IV3SLS(data)
res = mod.fit(cov_type="unadjusted")
y = []
x = []
z = []
for key in data:
val = data[key]
if isinstance(val, tuple):
y.append(val[0])
x.append(np.concatenate([val[1], val[2]], 1))
z.append(np.concatenate([val[1], val[3]], 1))
if len(val) == 5:
return # weighted
else:
y.append(val["dependent"])
x.append(np.concatenate([val["exog"], val["endog"]], 1))
z.append(np.concatenate([val["exog"], val["instruments"]], 1))
if "weights" in val:
return # weighted
out = simple_3sls(y, x, z)
assert_allclose(res.params.values, out.beta1.squeeze())
assert_allclose(res.sigma, out.sigma)
assert_allclose(np.asarray(res.resids), out.eps)
assert_allclose(np.diag(res.cov), np.diag(out.cov))
def test_too_few_instruments():
n = 200
dep = np.random.standard_normal((n, 2))
exog = np.random.standard_normal((n, 3))
endog = np.random.standard_normal((n, 2))
instr = np.random.standard_normal((n, 1))
eqns = {}
for i in range(2):
eqns["eqn.{0}".format(i)] = (dep[:, i], exog, endog, instr)
with pytest.raises(ValueError):
IV3SLS(eqns)
def test_redundant_instruments():
n = 200
dep = np.random.standard_normal((n, 2))
exog = np.random.standard_normal((n, 3))
endog = np.random.standard_normal((n, 2))
instr = np.random.standard_normal((n, 1))
instr = np.concatenate([exog, instr], 1)
eqns = {}
for i in range(2):
eqns["eqn.{0}".format(i)] = (dep[:, i], exog, endog, instr)
with pytest.raises(ValueError):
IV3SLS(eqns)
def test_too_many_instruments():
n = 50
dep = np.random.standard_normal((n, 2))
exog = np.random.standard_normal((n, 3))
endog = np.random.standard_normal((n, 2))
instr = np.random.standard_normal((n, n + 1))
eqns = {}
for i in range(2):
eqns["eqn.{0}".format(i)] = (dep[:, i], exog, endog, instr)
with pytest.raises(ValueError):
IV3SLS(eqns)
def test_wrong_input_type():
n = 200
dep = np.random.standard_normal((n, 2))
exog = np.random.standard_normal((n, 3))
endog = np.random.standard_normal((n, 2))
instr = np.random.standard_normal((n, 1))
instr = np.concatenate([exog, instr], 1)
eqns = []
for i in range(2):
eqns.append((dep[:, i], exog, endog, instr))
with pytest.raises(TypeError):
IV3SLS(eqns)
eqns = {}
for i in range(2):
eqns[i] = (dep[:, i], exog, endog, instr)
with pytest.raises(ValueError):
IV3SLS(eqns)
def test_multivariate_iv():
n = 250
dep = np.random.standard_normal((n, 2))
exog = np.random.standard_normal((n, 3))
exog = DataFrame(exog, columns=["exog.{0}".format(i) for i in range(3)])
endog = np.random.standard_normal((n, 2))
endog = DataFrame(endog, columns=["endog.{0}".format(i) for i in range(2)])
instr = np.random.standard_normal((n, 3))
instr = DataFrame(instr, columns=["instr.{0}".format(i) for i in range(3)])
eqns = {}
for i in range(2):
eqns["dependent.{0}".format(i)] = (dep[:, i], exog, endog, instr)
mod = IV3SLS(eqns)
res = mod.fit()
common_mod = IV3SLS.multivariate_iv(dep, exog, endog, instr)
common_res = common_mod.fit()
assert_series_equal(res.params, common_res.params)
def test_multivariate_iv_bad_data():
n = 250
dep = np.random.standard_normal((n, 2))
instr = np.random.standard_normal((n, 3))
instr = DataFrame(instr, columns=["instr.{0}".format(i) for i in range(3)])
with pytest.raises(ValueError):
IV3SLS.multivariate_iv(dep, None, None, instr)
def test_fitted(data):
mod = IV3SLS(data)
res = mod.fit()
expected = []
for i, key in enumerate(res.equations):
eq = res.equations[key]
fv = res.fitted_values[key].copy()
fv.name = "fitted_values"
assert_series_equal(eq.fitted_values, fv)
b = eq.params.values
direct = mod._x[i] @ b
expected.append(direct[:, None])
assert_allclose(eq.fitted_values, direct, atol=1e-8)
expected = np.concatenate(expected, 1)
expected = DataFrame(
expected,
index=mod._dependent[i].pandas.index,
columns=[key for key in res.equations],
)
assert_frame_equal(expected, res.fitted_values)
def test_no_exog():
data = generate_3sls_data_v2(nexog=0, const=False)
mod = IV3SLS(data)
res = mod.fit()
data = generate_3sls_data_v2(nexog=0, const=False, omitted="drop")
mod = IV3SLS(data)
res2 = mod.fit()
data = generate_3sls_data_v2(nexog=0, const=False, omitted="empty")
mod = IV3SLS(data)
res3 = mod.fit()
data = generate_3sls_data_v2(nexog=0, const=False, output_dict=False)
mod = IV3SLS(data)
res4 = mod.fit()
data = generate_3sls_data_v2(
nexog=0, const=False, output_dict=False, omitted="empty"
)
mod = IV3SLS(data)
res5 = mod.fit()
assert_series_equal(res.params, res2.params)
assert_series_equal(res.params, res3.params)
assert_series_equal(res.params, res4.params)
| assert_series_equal(res.params, res5.params) | pandas.testing.assert_series_equal |
import os
from unittest import mock
import pandas as pd
import pytest
from great_expectations.core import ExpectationSuite
from great_expectations.core.batch import Batch
from great_expectations.dataset import SqlAlchemyDataset
from great_expectations.dataset.sqlalchemy_dataset import SqlAlchemyBatchReference
from great_expectations.datasource import SqlAlchemyDatasource
from great_expectations.validator.validator import Validator
from ruamel.yaml import YAML
yaml = YAML()
@pytest.fixture
def test_db_connection_string(tmp_path_factory, test_backends):
if "sqlite" not in test_backends:
pytest.skip("skipping fixture because sqlite not selected")
df1 = pd.DataFrame({"col_1": [1, 2, 3, 4, 5], "col_2": ["a", "b", "c", "d", "e"]})
df2 = pd.DataFrame({"col_1": [0, 1, 2, 3, 4], "col_2": ["b", "c", "d", "e", "f"]})
import sqlalchemy as sa
basepath = str(tmp_path_factory.mktemp("db_context"))
path = os.path.join(basepath, "test.db")
engine = sa.create_engine("sqlite:///" + str(path))
df1.to_sql("table_1", con=engine, index=True)
df2.to_sql("table_2", con=engine, index=True, schema="main")
# Return a connection string to this newly-created db
return "sqlite:///" + str(path)
def test_sqlalchemy_datasource_custom_data_asset(
data_context, test_db_connection_string
):
name = "test_sqlalchemy_datasource"
class_name = "SqlAlchemyDatasource"
data_asset_type_config = {
"module_name": "custom_sqlalchemy_dataset",
"class_name": "CustomSqlAlchemyDataset",
}
data_context.add_datasource(
name,
class_name=class_name,
credentials={"connection_string": test_db_connection_string},
data_asset_type=data_asset_type_config,
batch_kwargs_generators={
"default": {"class_name": "TableBatchKwargsGenerator"}
},
)
# We should now see updated configs
with open(
os.path.join(data_context.root_directory, "great_expectations.yml"), "r"
) as data_context_config_file:
data_context_file_config = yaml.load(data_context_config_file)
assert (
data_context_file_config["datasources"][name]["data_asset_type"]["module_name"]
== "custom_sqlalchemy_dataset"
)
assert (
data_context_file_config["datasources"][name]["data_asset_type"]["class_name"]
== "CustomSqlAlchemyDataset"
)
# We should be able to get a dataset of the correct type from the datasource.
data_context.create_expectation_suite("table_1.boo")
batch = data_context.get_batch(
data_context.build_batch_kwargs(
"test_sqlalchemy_datasource", "default", "table_1"
),
"table_1.boo",
)
assert type(batch).__name__ == "CustomSqlAlchemyDataset"
res = batch.expect_column_func_value_to_be("col_1", 1)
assert res.success is True
def test_standalone_sqlalchemy_datasource(test_db_connection_string, sa):
datasource = SqlAlchemyDatasource(
"SqlAlchemy",
connection_string=test_db_connection_string,
echo=False,
batch_kwargs_generators={
"default": {"class_name": "TableBatchKwargsGenerator"}
},
)
assert set(datasource.get_available_data_asset_names()["default"]["names"]) == {
("main.table_1", "table"),
("main.table_2", "table"),
}
batch_kwargs = datasource.build_batch_kwargs("default", "main.table_1")
batch = datasource.get_batch(batch_kwargs=batch_kwargs)
assert isinstance(batch, Batch)
assert isinstance(batch.data, SqlAlchemyBatchReference)
dataset = SqlAlchemyDataset(**batch.data.get_init_kwargs())
assert len(dataset.head(10)) == 5
def test_create_sqlalchemy_datasource(data_context):
name = "test_sqlalchemy_datasource"
# type_ = "sqlalchemy"
class_name = "SqlAlchemyDatasource"
# Use sqlite so we don't require postgres for this test.
connection_kwargs = {"credentials": {"drivername": "sqlite"}}
# It should be possible to create a sqlalchemy source using these params without
# saving substitution variables
data_context.add_datasource(name, class_name=class_name, **connection_kwargs)
data_context_config = data_context.get_config()
assert name in data_context_config["datasources"]
assert data_context_config["datasources"][name]["class_name"] == class_name
# We should be able to get it in this session even without saving the config
source = data_context.get_datasource(name)
assert isinstance(source, SqlAlchemyDatasource)
var_name = "test_sqlalchemy_datasource"
data_context.save_config_variable(var_name, connection_kwargs["credentials"])
# But we should be able to add a source using a substitution variable
name = "second_source"
data_context.add_datasource(
name, class_name=class_name, credentials="${" + var_name + "}"
)
data_context_config = data_context.get_config()
assert name in data_context_config["datasources"]
assert data_context_config["datasources"][name]["class_name"] == class_name
assert (
data_context_config["datasources"][name]["credentials"] == "${" + var_name + "}"
)
source = data_context.get_datasource(name)
assert isinstance(source, SqlAlchemyDatasource)
# Finally, we should be able to confirm that the folder structure is as expected
with open(
os.path.join(data_context.root_directory, "uncommitted/config_variables.yml"),
"r",
) as credentials_file:
substitution_variables = yaml.load(credentials_file)
assert substitution_variables == {
var_name: dict(**connection_kwargs["credentials"])
}
def test_sqlalchemy_source_templating(sqlitedb_engine):
datasource = SqlAlchemyDatasource(
engine=sqlitedb_engine,
batch_kwargs_generators={"foo": {"class_name": "QueryBatchKwargsGenerator"}},
)
generator = datasource.get_batch_kwargs_generator("foo")
generator.add_query("test", "select 'cat' as ${col_name};")
batch = datasource.get_batch(
generator.build_batch_kwargs(
"test", query_parameters={"col_name": "animal_name"}
)
)
dataset = Validator(
batch,
expectation_suite=ExpectationSuite("test"),
expectation_engine=SqlAlchemyDataset,
).get_dataset()
res = dataset.expect_column_to_exist("animal_name")
assert res.success is True
res = dataset.expect_column_values_to_be_in_set("animal_name", ["cat"])
assert res.success is True
def test_sqlalchemy_source_limit(sqlitedb_engine):
df1 = | pd.DataFrame({"col_1": [1, 2, 3, 4, 5], "col_2": ["a", "b", "c", "d", "e"]}) | pandas.DataFrame |
import pandas
import os
from locale import *
import locale
locale.setlocale(LC_NUMERIC, '')
fs = pandas.read_csv('./Ares.csv', sep=';', encoding='cp1252', parse_dates=[1,3,5,7,9,11], dayfirst=True)
# Separar por pares de columnas
materia_organica = pandas.DataFrame(fs[[fs.columns[0], fs.columns[1]]])
conductividad = pandas.DataFrame(fs[[fs.columns[2], fs.columns[3]]])
amonio = pandas.DataFrame(fs[[fs.columns[4], fs.columns[5]]])
solidos = pandas.DataFrame(fs[[fs.columns[6], fs.columns[7]]])
temperatura = pandas.DataFrame(fs[[fs.columns[8], fs.columns[9]]])
lluvias = | pandas.DataFrame(fs[[fs.columns[10], fs.columns[11]]]) | pandas.DataFrame |
from flask import render_template, request, redirect, url_for, session
from app import app
from model import *
from model.main import *
import json
import pandas as pd
import numpy as np
class DataStore():
model=None
model_month=None
sale_model=None
data = DataStore()
@app.route('/', methods=["GET"])
def home():
percent=percentageMethod()
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
with open('percent.json') as f:
file2 = json.load(f)
labels=file2['index']
data=file2['data']
if "username" in session:
return render_template('index.html', last_year=lastYear(), last_month=lastMonth(),dataset=data, label=labels, percent=percent,
month_index=month_index, month_data=month_data)
else:
return render_template('login.html')
# Register new user
@app.route('/register', methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
elif request.method == "POST":
registerUser()
return redirect(url_for("login"))
#Check if email already exists in the registratiion page
@app.route('/checkusername', methods=["POST"])
def check():
return checkusername()
# Everything Login (Routes to renderpage, check if username exist and also verifypassword through Jquery AJAX request)
@app.route('/login', methods=["GET"])
def login():
if request.method == "GET":
if "username" not in session:
return render_template("login.html")
else:
return redirect(url_for("home"))
@app.route('/checkloginusername', methods=["POST"])
def checkUserlogin():
return checkloginusername()
@app.route('/checkloginpassword', methods=["POST"])
def checkUserpassword():
return checkloginpassword()
#The admin logout
@app.route('/logout', methods=["GET"]) # URL for logout
def logout(): # logout function
session.pop('username', None) # remove user session
return redirect(url_for("home")) # redirect to home page with message
#Forgot Password
@app.route('/forgot-password', methods=["GET"])
def forgotpassword():
return render_template('forgot-password.html')
#404 Page
@app.route('/404', methods=["GET"])
def errorpage():
return render_template("404.html")
#Blank Page
@app.route('/blank', methods=["GET"])
def blank():
return render_template('blank.html')
@app.route('/totalyear', methods=["GET"])
def total_year():
total_year=totalYear()
file1=pd.read_json('total_year.json',orient='index')
year_index=np.array(file1['year'])
year_data=np.array(file1['total'])
return render_template("total_year.html",year_index=year_index, year_data=year_data)
@app.route('/totalmonth', methods=["GET"])
def total_month():
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
num=6
# Fit model
model=fit_model()
data.model_month=model
predict_rs, fitted_data=predict(model,6)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['total'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num)
def check_stationary():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def fit_model():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
data=data1['total']
stationary=check_stationary()
p=stationary[1]
if (p<0.05):
result1 = fit_model_stationary(data)
else:
result1 = fit_model_non_stationary(data)
return result1
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','total']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','total']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','total']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totalmonth', methods=["POST"])
def total_month_num():
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
#Get data
if request.method == "POST":
num = int(request.form.get("num_month"))
predict_rs, fitted_data=predict(data.model_month,num)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['total'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=data.model_month, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num)
def check_stationary():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','total']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = | pd.Series(fitted_month, index=date) | pandas.Series |
# Copyright 2021 ETH Zurich, Media Technology Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pickle
import implicit
import pandas as pd
from scipy import sparse
import os
import sys
sys.path.append(os.getcwd())
from preprocessing import load_data_cv, transform_horizontal_to_vertical, get_metadata
from evaluation import evaluate
import random
def pop_predict(model, grouped_train, N=10):
now = datetime.datetime.now()
prediction=model[:N]['article_id'].values.tolist()
predictions = pd.DataFrame(grouped_train)
predictions['predictions'] = pd.Series([prediction] * len(predictions) ,index=predictions.index)
predictions['read_articles'] = grouped_train
isnull = predictions['read_articles'].isnull()
if isnull.sum() > 0:
predictions.loc[isnull, 'read_articles'] = [[[]] * isnull.sum()]
predictions['predictions'] = [[article for article in x[1] if article not in x[0]] for x in zip(predictions['read_articles'], predictions['predictions'])]
print(f"Pop prediction done in {datetime.datetime.now() - now}")
return predictions
def random_predict(articles, grouped_train, N=10):
now = datetime.datetime.now()
predictions = | pd.DataFrame(grouped_train) | pandas.DataFrame |
import pandas as pd
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.decomposition import PCA
import jellyfish # for distance functions
from fuzzywuzzy import fuzz # for distance functions
import numpy as np # to process numeric arrays
# calculate the distance between two given strings
def get_distance(string_a, string_b):
# similarity scores given by edit distance functions are reversed to turn them into distances
lev = 1 - fuzz.ratio(string_a, string_b) / 100 # given value is normalized in range 1-100, not in 0-1
jar = 1 - jellyfish.jaro_distance(string_a, string_b)
jw = 1 - jellyfish.jaro_winkler(string_a, string_b)
score = (lev + jar + jw) / 3 # calculate mean value of all distances
return score
# drop the duplicates from the given cluster; a tuple is dropped if its similarity score with another tuple
# with same label is above the given threshold
def drop_duplicates_threshold(dataset_cluster, threshold):
row_num = dataset_cluster.shape[0]
for a in range(0, row_num):
if a >= row_num:
break
row1 = dataset_cluster.iloc[a]
for b in range(0, row_num):
if a == b:
continue
if b >= row_num:
break
row2 = dataset_cluster.iloc[b]
sim_sum = 0
col_num = len(dataset_cluster.columns) - 1
for i in range(0, col_num):
sim_sum += 1-get_distance(str(row1[dataset_cluster.columns[i]]), str(row2[dataset_cluster.columns[i]]))
score = col_num - sim_sum
max_score = col_num - threshold*col_num
if score <= max_score:
dataset_cluster = dataset_cluster.drop(dataset_cluster.index[b])
row_num -= 1
b -= 1
# row_num = dataset.shape[0]
l = dataset_cluster.shape[0]
return dataset_cluster
# vectorize dataset values, turning each tuple into the feature values using the Bag of Word approach
# Hashing vectorizing implements "feature hashing" technique: instead of building a hash table of the features
# encountered in training, as the vectorizers do, a hash function is applied to the features to determine their
# column index in sample matrices directly. It uses BoW for the initial feature extraction, but using the hashing
# trick allows to greatly optimize the performance, which makes this approach the best candidate to be used in the
# implementation of clustering workflow.
def vectorize_dataset(dataset):
feature_matrix = []
# define vectorizer
vectorizer = HashingVectorizer(n_features=dataset.shape[1]*2)
# iterate through all rows in the dataset
for i in range(0, dataset.shape[0]):
# extract row values
row_values = list(dataset.iloc[i].astype(str))
# vectorize the row
vector = vectorizer.transform(row_values)
# transform the created feature matrix from sparse to dense form
dense_vector = vector.todense()
# flatten the feature matrix, turning it into a single row
dense_vector = np.array(dense_vector)
flatten_vector = np.ndarray.flatten(dense_vector)
# add feature row to the dataset feature matrix
feature_matrix.append(list(flatten_vector))
return feature_matrix
# cluster the veсtorized dataset using the Mean Shift algorithm
# The algorithms starts from initializing random seed and choosing the size of the window; the "center of mass" is
# detected by calculating the mean, and the search window is shifted towards this center; the process is repeated until
# convergence. For the clustering, the whole space is tessellated with the search windows, and all the point which are
# in the attraction basin (region where all trajectories lead to the same mode) belong to the same cluster
#
# prior to the clustering, the dimensionality of feature matrix is reduced using conventional PCA
# It identifies an "optimal" data projection to low-dimensional space, maximizing the data variance by finding new
# coordinate system. The method is based on the solving of the eigenvalue problem for the covariance matrix of the
# data (either calculating it directly or using Singular Value Decomposition in cases where the number of dimension
# is far greater than the number of data examples). The solution produces the eigenvectors and eigenvalues; the
# eigenvectors are ranked according to their eigenvalues (in the descending order of the eigenvalues), and top-N
# (called principal components) are chosen to be the coordinate axis of the new low-dimensional space. The matrix
# composed of these chosen eigenvectors is used on the data to transform it from high-dimensional to low-dimensional
# space.
def cluster_data(dataset, pca_comp, shift_quantile):
cluster_data = vectorize_dataset(dataset)
pca = PCA(n_components=pca_comp)
pca.fit(cluster_data)
pca_data = pca.transform(cluster_data)
bandwidth = estimate_bandwidth(pca_data, quantile=shift_quantile, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(cluster_data)
labels = ms.labels_
label_df = pd.DataFrame(labels)
new_data = dataset.join(label_df)
label_col = list(new_data.columns)[len(new_data.columns)-1]
new_data = new_data.rename(columns={label_col: 'label'})
new_data.sort_values(by='label')
return new_data
# drop the duplicates from the given clustered dataset, using the given distance threshold value
def drop_duplicates_clusters(dataset, distance_threshold):
# extract labels, i.e. extract the clusters
labels_unique = np.unique(dataset['label'])
new_dataset = | pd.DataFrame(columns=dataset.columns) | pandas.DataFrame |
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import prot.viz
import prot.size as size
colors, palette = prot.viz.bokeh_theme()
dataset_colors = {'li_2014':colors['purple'], 'schmidt_2016':colors['light_blue'],
'peebo_2015':colors['green'], 'valgepea_2013':colors['red']}
prot.viz.plotting_style()
######################
# plot configuration #
######################
fig = plt.figure(constrained_layout=True)
widths = [6, 7]
heights = [2, 0.5, 0.1, 0.65]
spec = fig.add_gridspec(ncols=2, nrows=4, width_ratios=widths,
height_ratios=heights)
ax1 = fig.add_subplot(spec[0, 0])
# ax2 = fig.add_subplot(spec[0, 1])
ax3 = fig.add_subplot(spec[2:, 0])
ax4 = fig.add_subplot(spec[:3, 1])
# Parameters and calculations #
###############################
# use estimates of E. coli rod cell size based
# on the values used from Si et al. (2017,2019);
# Use our fit of that data as fcn of growth rate
# to determine cell size, length, width, and surface area.
# gr = np.linspace(0.01, 4.4, 100)
V = np.linspace(0.5,50, 500)
# V = size.lambda2size(gr)
# print(V)
# w = size.lambda2width(gr)
# l = size.lambda2length(gr)
# SA_rod = size.rod_SA(l, w, V)
SA_rod = 2 * np.pi * V**(2/3)
SA_V_ratio_rod = SA_rod / V
# Sphere V = (4/3) pi r**3
# Sphere SA = 4 pi r**2
SA_sphere = V**(2/3) * ((4/3) * np.pi)**(-2/3) * 4 * np.pi
SA_V_ratio_sphere = SA_sphere / V
# ATP equivalents demand w.r.t. volume ; 1E6 ATP/(um3 s)
Pv = 1E6 * V
# ATP max - half surface area devoted to respiration
Ps_um_resp = ((3)/ (1E-6))
Ps_resp_rod = Ps_um_resp * SA_rod * 0.5
Ps_resp_sphere = Ps_um_resp * SA_sphere * 0.5
# # ATP max - half surface area devoted to fermentation
# Ps_um_ferm= ((180*2)/ (50E-6))
# Ps_ferm_rod = Ps_um_ferm * SA_rod * 0.5
# Ps_ferm_sphere = Ps_um_ferm * SA_sphere * 0.5
#### for Fill_between, need common x-axis array
SA_ = np.linspace(np.min(SA_sphere), np.max(SA_rod), 500)
V_sphere = (SA_/(((4/3) * np.pi)**(-2/3) * 4 * np.pi))**(3/2)
SA_V_ratio_sphere_ = SA_/V_sphere
V_rod = (SA_/(2 * np.pi))**(3/2)
SA_V_ratio_rod_ = SA_/V_rod
# ATP max - half surface area devoted to respiration
Ps_um_resp = ((3)/ (1E-6))
Ps_resp_ = Ps_um_resp * SA_ * 0.5
######################
# Plot 1, S/V scaling #
######################
ax1.plot(Pv, SA_V_ratio_rod, color=colors['dark_green'], label='rod',
alpha=0.9, lw = 0.5, ls = '-.')
ax1.plot(Pv, SA_V_ratio_sphere, color=colors['dark_green'], label='sphere',
alpha=0.9, lw = 0.5, ls = '--')
ax1.fill_between(Pv, y1 = SA_V_ratio_sphere, y2=SA_V_ratio_rod,
color=colors['dark_green'],alpha=0.2, lw = 0)
# plot the max for respiration
ax1.plot(Ps_resp_rod, SA_V_ratio_rod, color=colors['blue'],
label='rod', alpha=0.9, lw = 0.5, ls = '-.')
ax1.plot(Ps_resp_sphere, SA_V_ratio_sphere, color=colors['blue'],
label='sphere', alpha=0.9, lw = 0.5, ls = '--')
ax1.fill_between(Ps_resp_, y1 = SA_V_ratio_sphere_, y2 = SA_V_ratio_rod_,
color=colors['blue'],alpha=0.2, lw = 0)
# # Populate second plot with growth rates
# S/V for E. coli datasets
# Load the data set
data = pd.read_csv('../../data/compiled_absolute_measurements.csv')
for g, d in data.groupby(['dataset', 'condition', 'growth_rate_hr']):
V = size.lambda2size(g[2])
# ATP equivalents demand w.r.t. volume ; 1E6 ATP/(um3 s)
Pv = 1E6 * V
# assume aspect ratio of 4 (length/width), which is
# appoximately correct for E. coli
SA_rod = 2 * np.pi * V**(2/3)
SV = SA_rod/V
ax1.plot(Pv, SV, 'o', color=dataset_colors[g[0]],
alpha=0.75, markeredgecolor='k', markeredgewidth=0.25,
label = g[2], ms=4, zorder=10)
# Format the axes
for a in [ax1]:#,ax2]:
a.xaxis.set_tick_params(labelsize=5)
a.yaxis.set_tick_params(labelsize=5)
a.set_xscale('log')
a.set_ylim([1.5, 8.0])
# a.legend(fontsize=5, loc='lower right')
ax1.set_xlim([np.min(Pv), np.max(Ps_resp_)])
ax1.set_xlabel('ATP equivalents per s', fontsize=6)
ax1.set_ylabel('S/V ratio [$\mu$m$^{-1}$]', fontsize=6)
legend_elements = [Line2D([0], [0], marker='o', color='gray', lw = 0,
alpha=0.5, markeredgecolor='k', markeredgewidth=0.25,
label = 'estimated for\n$E. coli$ data', ms=4),
Line2D([0], [0], ls = '-.', color='gray', label='rod (aspect\nratio = 4)',
alpha = 1.0, lw = 0.5, markeredgewidth=0.0, markeredgecolor='k'),
Line2D([0], [0], ls = '--', color='gray', label='sphere',
alpha = 1.0, lw = 0.5, markeredgewidth=0.0, markeredgecolor='k')]
legend = ax1.legend(handles = legend_elements, loc = 'upper right',
fontsize = 5)
ax1.add_artist(legend)
######################
# Plot 2
######################
# total fg per cell of inner membrane proteins, GO:0005886
data_membrane = data[data.go_terms.str.contains('GO:0005886')]
data_membrane_fg_summary = | pd.DataFrame() | pandas.DataFrame |
""" Methods to run Auto-WEKA """
import pathlib
import time, warnings, os, math, gc
import pandas as pd
import numpy as np
import psutil
from psutil import virtual_memory
from autogluon.utils.tabular.utils.loaders import load_pd
from autogluon.utils.tabular.ml.constants import BINARY, MULTICLASS, REGRESSION
from autogluon import TabularPrediction as task
from ..process_data import processData
from .csv2arff import Csv2Arff
def autoweka_fit_predict(train_data, test_data, label_column, problem_type, output_directory,
autoweka_path=None, eval_metric = None, runtime_sec = 60, random_state = 0, num_cores = 1):
""" Specify different output_directory for each run.
Args:
autoweka_path : str
Folder containing lib/autoweka/autoweka.jar installed during execution
of autoweka/setup.sh (must end with / character).
Returns: tuple (num_models_trained, num_models_ensemble, fit_time, y_pred, y_prob, predict_time, class_order)
where num_models_trained = num_models_ensemble = None,
class_order indicates the ordering of classes corresponding to columns of y_prob (2D numpy array of predicted probabilties),
y_pred = pandas Series of predicted classes for test data.
"""
# if problem_type == REGRESSION:
# raise NotImplementedError('Regression is not supported yet')
if autoweka_path is None:
working_directory = str(pathlib.Path().absolute())
autoweka_abs_path = str(pathlib.Path(__file__).parent.absolute())
autoweka_path = str(os.path.relpath(autoweka_abs_path, working_directory)) + '/'
# First need to ensure unique labels appear in test data in same order as in training data:
print("original train_data[label_column]: ", train_data[label_column]) # Replace test labels with dummies
test_data, dummy_class = dummy_test_labels(train_data, test_data, label_column, problem_type)
train_data, test_data, class_prefix, labels_are_int = ag_preprocess(train_data, test_data, label_column,
problem_type, eval_metric)
""" Use this instead of processData if you prefer to run on raw data.
# However is very error-prone, eg. will error if there are new feature categories at test-time.
# Weka to requires target as the last attribute:
if train_data.columns[-1] != label_column:
y_train = train_data[label_column]
train_data.drop([label_column], axis=1, inplace=True)
train_data[label_column] = y_train
if test_data.columns[-1] != label_column:
y_test = test_data[label_column]
test_data.drop([label_column], axis=1, inplace=True)
test_data[label_column] = y_test
"""
class_order, train_file_path, test_file_path = data_to_file(train_data, test_data, output_directory,
label_column, problem_type)
fit_predict_time, weka_file, weka_model_file, cmd_root = autoweka_fit(train_file_path=train_file_path,
test_file_path=test_file_path, eval_metric=eval_metric, autoweka_path=autoweka_path,
output_directory=output_directory, num_cores=num_cores, runtime_sec=runtime_sec, random_state=random_state)
y_pred, y_prob = get_predictions(problem_type=problem_type, weka_file=weka_file,
class_prefix=class_prefix, labels_are_int=labels_are_int,
eval_metric=eval_metric)
fit_time, predict_time = time_predictions(fit_predict_time=fit_predict_time, test_file_path=test_file_path,
weka_model_file=weka_model_file, cmd_root=cmd_root)
num_models_ensemble = None
num_models_trained = None # TODO: hard to get these
if class_order is not None and len(class_order) > 0:
if class_order[0].startswith(class_prefix):
class_order = [clss[len(class_prefix):] for clss in class_order]
if labels_are_int:
print("converting classes back to int")
class_order = [int(clss) for clss in class_order]
return (num_models_trained, num_models_ensemble, fit_time, y_pred, y_prob, predict_time, class_order)
def dummy_test_labels(train_data, test_data, label_column, problem_type):
""" Returns copy of test data with dummy test labels for use with auto-weka """
print("Applying dummy_test_labels...")
train_label_subset = train_data[label_column].iloc[:len(test_data)].copy()
dummy_class = train_data[label_column].iloc[0] # placeholder class to use for imputing.
row = 0
while pd.isnull(dummy_class):
row += 1
if row >= len(train_data):
raise ValueError("All training labels are missing")
dummy_class = train_data[label_column].iloc[row].copy()
if len(train_label_subset) < len(test_data):
num_extra = len(test_data) - len(train_label_subset)
extra_labels = pd.Series([dummy_class] * num_extra)
train_label_subset = | pd.concat((train_label_subset, extra_labels)) | pandas.concat |
# coding: utf-8
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import random
import seaborn as sns
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import glob, os
import errno
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold
import warnings
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_val_score, cross_validate
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
from sklearn.svm import SVC
from sklearn import svm
from sklearn.model_selection import GridSearchCV
path = r'D:\ml\BAtestdata'
files = glob.glob(os.path.join(path, "*.csv"))
df_ba = []
nr_files = len(files)
print(nr_files)
for i in range(len(files)):
file = files[i]
df_ba.append(pd.read_csv(file,index_col=0))
print(files)
collected_data = | pd.read_csv('DatasetThesis.csv',index_col=0) | pandas.read_csv |
import pandas as pd
from xgboost import XGBClassifier
import data_transformation as dt
import feature_engineering as fe
import pickle
import matplotlib.pyplot as plt
import numpy as np
import xgboost as xgb
from pathlib import Path
from testing.predictiveAccuracy import metric
from testing.predictiveAccuracy import custom_scorer_metric
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV
from sklearn.metrics import make_scorer
from sklearn import linear_model
load = False
##############
# xgboost #
##############
clean_path = Path(Path(__file__).parent.absolute(), 'data', 'clean_data.csv')
if load:
df = pd.read_csv(clean_path)
df.Date = | pd.to_datetime(df.Date, format='%Y-%m-%d') | pandas.to_datetime |
import pandas as pd
import datatable as dt
import zipfile
import re
import os
import time
from datetime import timedelta
import sys
def pd_options():
desired_width = 300
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns', None)
import warnings
warnings.filterwarnings('ignore')
def directory(directory_path):
"""Puts you in the right directory. Gives you list of files in path"""
os.chdir(re.findall("^(.*[\\\/])", directory_path)[0])
csv_files = os.listdir(directory_path)
return csv_files
def get_csv_names_from_list(paths):
if not isinstance(paths, list):
raise TypeError('We need a list of csv file paths here')
dfs = []
for i in paths:
if i.endswith('.csv'):
df_name = re.findall("\w+(?=\.)", i)[0]
dfs.append(df_name)
print(str(",".join(dfs)))
print(str(".shape,".join(dfs)), ".shape", sep='')
def read_data(path_ending_with_filename=None, return_df=False, method=None, dataframes=None):
"""
e.g.
read_data(path)
sample_submission, test, train = read_data(path, True)
---
Reads single csv or list of csvs or csvs in zip.
Available methods:
'dt' = Datatable fread
TODO: Add to read methods. i.e., parquet, pickle, arrow, etc.
"""
dt.options.progress.enabled = True
if isinstance(path_ending_with_filename, str):
if path_ending_with_filename.endswith('.zip'):
zf = zipfile.ZipFile(path_ending_with_filename)
if dataframes:
dataframes = [x.strip(" ") for x in dataframes.split(",")]
if len(dataframes) == 1:
x = dataframes[0] + '.csv'
dfs = {}
start_time = time.monotonic()
if method == 'dt':
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = dt.fread(zf.open(x)).to_pandas()
else:
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = pd.read_csv(zf.open(x))
end_time = time.monotonic()
print(timedelta(seconds=end_time - start_time))
keys = list(dfs.keys())
values = list(dfs.values())
for i, k in enumerate(dfs):
print(i + 1, ".", " ", k, " ", "=", " ", "(", f"{values[i].shape[0]:,}", " ", ":", " ",
f"{values[i].shape[1]:,}", ")",
sep="")
if return_df:
return pd.DataFrame.from_dict(values[0])
else:
files = [x + '.csv' for x in dataframes]
else:
files = zf.namelist()
if return_df:
dfs = {}
start_time = time.monotonic()
for x in files:
if x.endswith('.csv'):
if method == 'dt':
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = dt.fread(zf.open(x)).to_pandas()
else:
dfs["{0}".format(re.findall("\w+(?=\.)", x)[0])] = pd.read_csv(zf.open(x))
end_time = time.monotonic()
print(timedelta(seconds=end_time - start_time))
keys = list(dfs.keys())
values = list(dfs.values())
for i, k in enumerate(dfs):
print(i + 1, ".", " ", k, " ", "=", " ", "(", f"{values[i].shape[0]:,}", " ", ":", " ",
f"{values[i].shape[1]:,}", ")",
sep="")
return dfs.values()
else:
if not dataframes:
csv_file_names = [format(re.findall("\w+(?=\.)", zf.namelist()[i])[0]) for i in
range(len(zf.namelist())) if zf.namelist()[i].endswith('.csv')]
# if dataframes:
#
# file_pos = [i for i, x in enumerate(csv_file_names)]
# else:
file_pos = [i for i, x in enumerate(zf.namelist()) if x.endswith('.csv')]
uncompressed_dir = [f"{(zf.filelist[i].file_size / 1024 ** 2):.2f} Mb" for i in file_pos]
compressed = [f"{(zf.filelist[i].compress_size / 1024 ** 2):.2f} Mb" for i in file_pos]
print(pd.concat([ | pd.Series(csv_file_names) | pandas.Series |
import numpy as np
import pandas as pd
import json as json_package
import requests
from bs4 import BeautifulSoup
# import json
import logging
# Add and format time stamp in logging messages
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
level=logging.INFO,
datefmt="%c",
)
# Mute numexpr threads info
logging.getLogger("numexpr").setLevel(logging.WARNING)
# Custom functions
from .utils import rest_query, get_uniprot_info, wrap_cols_func
# Constants
from .constants import ENSEMBL_REST_API, UNIPROT_REST_API, NCBI_URL
## gget info
def info(ens_ids, wrap_text=False, expand=False, json=False, verbose=True, save=False):
"""
Fetch gene and transcript metadata using Ensembl IDs.
Args:
- ens_ids One or more Ensembl IDs to look up (string or list of strings).
Also supports WormBase and Flybase IDs.
- wrap_text If True, displays data frame with wrapped text for easy reading. Default: False.
- json If True, returns results in json/dictionary format instead of data frame. Default: False.
- verbose True/False whether to print progress information. Default True.
- save True/False wether to save csv with query results in current working directory. Default: False.
Returns a data frame containing the requested information.
Deprecated arguments: 'expand' (gget info now always returns all of the available information)
"""
# Handle deprecated arguments
if expand:
logging.info(
"'expand' argument deprecated! gget info now always returns all of the available information."
)
# Define Ensembl REST API server
server = ENSEMBL_REST_API
# Define type of returned content from REST
content_type = "application/json"
## Clean up Ensembl IDs
# If single Ensembl ID passed as string, convert to list
if type(ens_ids) == str:
ens_ids = [ens_ids]
# Remove Ensembl ID version if passed
ens_ids_clean = []
temp = 0
for ensembl_ID in ens_ids:
# But only for Ensembl ID (and not for flybase/wormbase IDs)
if ensembl_ID.startswith("ENS"):
ens_ids_clean.append(ensembl_ID.split(".")[0])
if "." in ensembl_ID and temp == 0:
if verbose is True:
logging.info(
"We noticed that you may have passed a version number with your Ensembl ID.\n"
"Please note that gget info will always return information linked to the latest Ensembl ID version (see 'ensembl_id')."
)
temp = +1
else:
ens_ids_clean.append(ensembl_ID)
# Remove duplicates in the Ensembl ID list without changing their order
ens_ids_clean = sorted(set(ens_ids_clean), key=ens_ids_clean.index)
# Create second clean list of Ensembl IDs which will not include IDs that were not found
ens_ids_clean_2 = ens_ids_clean.copy()
# Initiate dictionary to save results for all IDs in
master_dict = {}
# Query REST APIs from https://rest.ensembl.org/
for ensembl_ID in ens_ids_clean:
# Create dict to save query results
results_dict = {ensembl_ID: {}}
# Define the REST query
query = "lookup/id/" + ensembl_ID + "?" + "expand=1"
# Submit query
try:
df_temp = rest_query(server, query, content_type)
try:
# Add Ensembl ID with latest version number to df_temp
ensembl_id_dict = {
"ensembl_id": str(df_temp["id"]) + "." + str(df_temp["version"])
}
df_temp.update(ensembl_id_dict)
except KeyError:
# Just add Ensembl ID if no version found
ensembl_id_dict = {"ensembl_id": str(df_temp["id"])}
df_temp.update(ensembl_id_dict)
# If query returns in an error:
except RuntimeError:
# Try submitting query without expand (expand does not work for exons and translation IDs)
try:
query = "lookup/id/" + ensembl_ID + "?"
df_temp = rest_query(server, query, content_type)
# Add Ensembl ID with latest version number to df_temp
ensembl_id_dict = {
"ensembl_id": str(df_temp["id"]) + "." + str(df_temp["version"])
}
df_temp.update(ensembl_id_dict)
# Log error if this also did not work
except RuntimeError:
if verbose is True:
logging.warning(
f"ID '{ensembl_ID}' not found. Please double-check spelling/arguments and try again."
)
# Remove IDs that were not found from ID list
ens_ids_clean_2.remove(ensembl_ID)
continue
# Add results to master dict
results_dict[ensembl_ID].update(df_temp)
master_dict.update(results_dict)
# Return None if none of the Ensembl IDs were found
if len(master_dict) == 0:
return None
## Build data frame from dictionary
df = pd.DataFrame.from_dict(master_dict)
# Rename indeces
df = df.rename(
index={
"description": "ensembl_description",
"Parent": "parent_gene",
"display_name": "ensembl_gene_name",
}
)
## For genes and transcripts, get gene names and descriptions from UniProt
df_temp = pd.DataFrame()
for ens_id, id_type in zip(ens_ids_clean_2, df.loc["object_type"].values):
if id_type == "Gene" or id_type == "Transcript":
# Check if this is a wrombase ID:
if ens_id.startswith("WB"):
df_uniprot = get_uniprot_info(
UNIPROT_REST_API, ens_id, id_type="WB_Gene", verbose=verbose
)
elif ens_id.startswith("T"):
df_uniprot = get_uniprot_info(
UNIPROT_REST_API,
".".join(
ens_id.split(".")[:-1]
), # Remove the version number from WormBase TRS IDs for submission to UniProt
id_type="WB_Transcript",
verbose=verbose,
)
# Check if this is a flybase ID:
elif ens_id.startswith("FB"):
if id_type == "Gene":
df_uniprot = get_uniprot_info(
UNIPROT_REST_API, ens_id, id_type="FB_Gene", verbose=verbose
)
else:
df_uniprot = get_uniprot_info(
UNIPROT_REST_API,
ens_id,
id_type="FB_Transcript",
verbose=verbose,
)
else:
df_uniprot = get_uniprot_info(
UNIPROT_REST_API, ens_id, id_type=id_type, verbose=verbose
)
if not isinstance(df_uniprot, type(None)):
# If two different UniProt IDs for a single query ID are returned, they should be merged into one column
# So len(df_uniprot) should always be 1
if len(df_uniprot) > 1:
# If the above somehow failed, we will only return the first result.
df_uniprot = df_uniprot.iloc[[0]]
if verbose is True:
logging.warning(
f"More than one UniProt match was found for ID {ens_id}. Only the first match and its associated information will be returned."
)
else:
if verbose is True:
logging.warning(f"No UniProt entry was found for ID {ens_id}.")
## Get NCBI gene ID and description (for genes only)
url = NCBI_URL + f"/gene/?term={ens_id}"
html = requests.get(url)
# Raise error if status code not "OK" Response
if html.status_code != 200:
raise RuntimeError(
f"NCBI returned error status code {html.status_code}. Please try again."
)
## Web scrape NCBI website for gene ID, synonyms and description
soup = BeautifulSoup(html.text, "html.parser")
# Check if NCBI gene ID is available
try:
ncbi_gene_id = soup.find("input", {"id": "gene-id-value"}).get("value")
except:
ncbi_gene_id = np.nan
# Check if NCBI description is available
try:
ncbi_description = (
soup.find("div", class_="section", id="summaryDiv")
.find("dt", text="Summary")
.find_next_sibling("dd")
.text
)
except:
ncbi_description = np.nan
# Check if NCBI synonyms are available
try:
ncbi_synonyms = (
soup.find("div", class_="section", id="summaryDiv")
.find("dt", text="Also known as")
.find_next_sibling("dd")
.text
)
# Split NCBI synonyms
ncbi_synonyms = ncbi_synonyms.split("; ")
except:
ncbi_synonyms = None
# If both NCBI and UniProt synonyms available,
# final synonyms list will be combined a set of both lists
if ncbi_synonyms is not None and not isinstance(df_uniprot, type(None)):
# Collect and flatten UniProt synonyms
uni_synonyms = df_uniprot["uni_synonyms"].values[0]
synonyms = list(set().union(uni_synonyms, ncbi_synonyms))
# Add only UniProt synonyms if NCBI syns not available
elif ncbi_synonyms is None and not isinstance(df_uniprot, type(None)):
synonyms = df_uniprot["uni_synonyms"].values[0]
else:
synonyms = np.nan
# Sort synonyms alphabetically (is sortable)
try:
synonyms = sorted(synonyms)
except:
None
# Save NCBI info to data frame
df_ncbi = pd.DataFrame(
{
"ncbi_gene_id": [ncbi_gene_id],
"ncbi_description": [ncbi_description],
"synonyms": [synonyms],
},
)
# Transpost NCBI df and add Ensembl ID as column name
df_ncbi = df_ncbi.T
df_ncbi.columns = [ens_id]
## Add NCBI and UniProt info to data frame
if not isinstance(df_uniprot, type(None)):
# Transpose UniProt data frame and add Ensembl ID as column name
df_uniprot = df_uniprot.T
df_uniprot.columns = [ens_id]
# Combine Ensembl and NCBI info
df_uni_ncbi = pd.concat([df_uniprot, df_ncbi])
# Append NCBI and UniProt info to df_temp
df_temp = pd.concat([df_temp, df_uni_ncbi], axis=1)
else:
# Add only NCBI info to df_temp
df_temp = pd.concat([df_temp, df_ncbi], axis=1)
# Append UniProt and NCBI info to df
df = | pd.concat([df, df_temp]) | pandas.concat |
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
# This is a simple example for a custom action which utters "Hello World!"
import re
import io
import ast
import requests
import numpy as np
import pandas as pd
import random
import multiprocessing
import threading, queue
from decimal import Decimal
from ttictoc import tic, toc
from typing import Any, Text, Dict, List, Union, Optional
from rasa_sdk import Action, Tracker
from rasa_sdk import FormValidationAction
from rasa_sdk.events import SlotSet, FollowupAction
from rasa_sdk.types import DomainDict
from rasa_sdk.executor import CollectingDispatcher
import warnings
from statistics import mean
from os import path, getenv
from datetime import datetime
import matplotlib.pyplot as plt
from botocore.exceptions import ClientError
from boto3.exceptions import S3UploadFailedError
import boto3
from sqlalchemy import create_engine
import sqlalchemy.types as sql_types
DB_AWS_ACCESS_KEY_ID = getenv('DB_AWS_ACCESS_KEY_ID')
DB_AWS_SECRET_ACCESS_KEY = getenv('DB_AWS_SECRET_ACCESS_KEY')
DB_AWS_BUCKET = 'journeypic'
# ------------------------------------------------------------------
def connect_to_server(params_dict, logging_func=print, debug=False):
connit = params_dict['connit_type'] + '://' + params_dict['connit_user'] + ':' \
+ params_dict['connit_pass'] + '@' \
+ params_dict['connit_host'] + ':' \
+ params_dict['connit_port'] + '/' \
+ params_dict['connit_db']
if debug:
logging_func(connit)
sql_engine = create_engine(connit, echo=False)
try:
sql_engine.connect()
logging_func("Connected Successfully")
except Exception as e:
logging_func("Error connecting to SQL server!\n\n%s\n" % str(e))
raise (e)
return sql_engine
def read_table(sql_engine, sql_query, logging_func=print, debug=False):
df = pd.read_sql(sql_query, sql_engine)
if debug:
match1 = search('FROM (.*) ORDER', sql_query)
match2 = search('FROM (.*) LIMIT', sql_query)
table_name = "Data"
if match1:
table_name = match1.group(1)
elif match2:
table_name = match2.group(1)
logging_func('\n%s %s:' % (table_name, str(df.shape)))
logging_func(df.head().to_string())
return df
# ------------------------------------------------------------------
def res_timer(res, tracker):
timer_state = tracker.get_slot('timer_state') if tracker.get_slot('timer_state') else 'n/a'
if timer_state == 'on':
res += '\nElapsed time: %.2f sec' % toc()
return res
# ------------------------------------------------------------------
def res_error(res, tracker, e):
timer_state = tracker.get_slot('timer_state') if tracker.get_slot('timer_state') else 'n/a'
if timer_state == 'on':
res += '\nERROR: %s' % e
return res
# ------------------------------------------------------------------
def simpleQuestionAnswer(tracker, entity, db_dict, user_intent=""):
lut_df = db_dict['lut']
custom_df = db_dict['nutrients_qna']
feature = lut_df['Entity'][entity]
try:
if feature in custom_df.index:
res = custom_df.loc[feature][user_intent]
else:
res = custom_df[[str(s) in feature for s in custom_df.index.tolist()]][user_intent][0]
if 'slot#' in res:
res_list = res.split(' ')
for k, el in enumerate(res_list):
if 'slot#' in el:
res_list[k] = str(tracker.get_slot(el.split('#')[1]))
res = ' '.join(res_list)
res_list = re.findall('\{.*?\}', res)
for match in res_list:
res = res.replace(match, str(eval(match[1:-1])))
except:
res = "אין לי מושג, מצטער!"
return res
def checkPrecentinres(title, x):
precent_position = None
listNumbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
if 'אחוז' in title:
precent_position = title.find('אחוז')
if '%' in title:
precent_position = title.find('%')
if precent_position is not None:
if title[precent_position - 2] == '0' and title[precent_position - 3] not in listNumbers:
title = title[:title.find(x)]
title += x
return title
# ------------------------------------------------------------------
def upload_file_to_s3(local_file, s3_folder, s3_file, aws_access_key_id, aws_secret_access_key, aws_bucket,
debug_en=False):
""" upload a given file to given location on Amazon-S3 """
success = True
HTTP_OK = 200
# Connect to Amazon-S3 client:
s3_client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
# Make a new directory on S3 (if not already exists):
if s3_folder + '/' in [x['Key'] for x in s3_client.list_objects(Bucket=aws_bucket)['Contents']]:
pass
elif not debug_en:
res = s3_client.put_object(Bucket=aws_bucket, Key='%s/' % s3_folder)
success = res['ResponseMetadata']['HTTPStatusCode'] == HTTP_OK
if not success:
return success, ""
# Upload local_file to S3:
x = 3
if not debug_en:
try:
if path.exists(local_file):
s3_client.upload_file(local_file, aws_bucket, path.join(s3_folder, s3_file))
s3_client.put_object_acl(ACL='public-read', Bucket=aws_bucket, Key=path.join(s3_folder, s3_file))
except (ClientError, S3UploadFailedError) as e:
success = False, ""
return success, "https://%s.s3.eu-central-1.amazonaws.com/%s/%s" % (aws_bucket, s3_folder, s3_file)
# ------------------------------------------------------------------
def donut_generator(names, sizes, radius=0.7, textstr_title='',
colors=None, figname="image.png"):
CARBS_GRAMS_CALOIRES = 4
PROTEIN_GRAMS_CALOIRES = 4
FAT_GRAMS_CALOIRES = 9
if colors is None:
colors = []
my_circle = plt.Circle((0, 0), radius, color='white')
fig, ax = plt.subplots()
labels = [':' + k1 + '\nםרג ' + str(round(k2, 2)) for k1, k2 in zip(names, sizes)]
if colors:
ax.pie(sizes, colors=colors)
else:
ax.pie(sizes)
plt.legend(bbox_to_anchor=(1.0, 0.88), fontsize=18, labels=labels)
p = plt.gcf()
p.gca().add_artist(my_circle)
if textstr_title:
ax.text(0.34, 1.05, textstr_title, transform=ax.transAxes, weight='bold',
fontsize=30, verticalalignment='center_baseline')
sizes[0] *= PROTEIN_GRAMS_CALOIRES
sizes[1] *= CARBS_GRAMS_CALOIRES
sizes[2] *= FAT_GRAMS_CALOIRES
sum2 = round(sum(sizes), 2)
textstr_center1 = str(sum2)
textstr_center2 = 'קלוריות'[::-1]
ax.text(0.39, 0.56, textstr_center1, transform=ax.transAxes, weight='bold',
fontsize=24, verticalalignment='center_baseline')
ax.text(0.37, 0.44, textstr_center2, transform=ax.transAxes,
fontsize=18, verticalalignment='center_baseline')
if figname:
fig.patch.set_facecolor('white')
fig.savefig(figname, bbox_inches='tight', facecolor='white')
else:
plt.show()
# ------------------------------------------------------------------
def donut_generator_wrapper(title, data):
names = [x[::-1] for x in list(data.keys())]
sizes = list(data.values())
colors = ['darkorange', 'lightgreen', 'blue']
textstr_title = title[::-1]
figname = "donut_image1.png"
donut_generator(names=names,
sizes=sizes,
radius=0.7,
textstr_title=textstr_title,
colors=colors,
figname=figname)
return figname
# ------------------------------------------------------------------
def iniliatize_Diagram(title, data):
unique_filename = lambda fname: "%s_%s%s" % (path.splitext(fname)[0],
datetime.now().strftime("%m%d%Y_%H%M%S"),
path.splitext(fname)[1])
figname = donut_generator_wrapper(title, data)
res, figure_url = upload_file_to_s3(local_file=figname,
s3_folder="auto_generated",
s3_file=unique_filename(figname),
aws_access_key_id=DB_AWS_ACCESS_KEY_ID,
aws_secret_access_key=DB_AWS_SECRET_ACCESS_KEY,
aws_bucket=DB_AWS_BUCKET)
return figure_url
# ------------------------------------------------------------------
def activate_load_db(name, table, dic):
dic[name] = load_db(table)
def get_tables(bits):
table_dict = {'0x1': 'tzameret', '0x2': 'lut', '0x4': 'nutrients_qna',
'0x8': 'food_qna', '0x10': 'common_food',
'0x20': 'food_ranges', '0x40': 'micro_nutrients',
'0x80': 'food_units', '0x100': 'bloodtest_vals',
'0x200': 'food_units_aliases', '0x400': 'food_units_features',
'0x800': 'subs_tags_alias', '0x1000': 'Weights_and_measures'}
scale = 16
bits_binary = bin(int(bits, scale))[2:].zfill(len(bits) * 4)
numbers_zero = ''
numbers = []
for digit in reversed(bits_binary):
if digit != '1':
numbers_zero += digit
else:
numbers.append('1' + numbers_zero)
numbers_zero += '0'
for i, value in enumerate(numbers):
decimal_representation = int(value, 2)
temp = hex(decimal_representation)
temp = int(temp, 16)
numbers[i] = hex(temp)
manager = multiprocessing.Manager()
db_dict = manager.dict()
jobs = []
for value in numbers:
# Pass the user at position i instead of the whole list
p = multiprocessing.Process(target=activate_load_db, args=(table_dict[value], value, db_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
return db_dict
def load_db(db_bitmap, read_databse_en=True):
# available_tables_df=read_database
sql_params = {'connit_type': 'postgresql',
'connit_user': 'newtrds',
'connit_pass': '<PASSWORD>!',
'connit_host': 'newt-tzameret-db.c1ub7aqk5fah.eu-central-1.rds.amazonaws.com',
'connit_port': '5432',
'connit_db': 'postgres',
'max_records': 1000}
sql_engine = connect_to_server(sql_params)
if db_bitmap == '0x1':
tzameret = read_table(sql_engine, "SELECT * FROM tzameret_entity")
return tzameret
# "Zameret_hebrew_features" - entities aliases
if db_bitmap == '0x2':
lut = read_table(sql_engine, "SELECT * FROM rasa_lut_entity")
lut = lut.set_index('Entity Alias')
return lut
# "Zameret_hebrew_features" - nutrients_questions
if db_bitmap == '0x4':
nutrients_qna = read_table(sql_engine, "SELECT * FROM rasa_nutrients_qna_entity")
nutrients_qna = nutrients_qna.set_index('Entity')
return nutrients_qna
# "Zameret_hebrew_features" - Food questions
if db_bitmap == '0x8':
food_qna = read_table(sql_engine, "SELECT * FROM rasa_food_qna_entity")
food_qna = food_qna.set_index('nutrition_density')
return food_qna
# "Zameret_hebrew_features" - List of common foods
if db_bitmap == '0x10':
common_food = read_table(sql_engine, "SELECT * FROM common_food_entity")
common_food = common_food.set_index('common_name')
return common_food
# "Newt Machine Readable" - FoodItemRanges
if db_bitmap == '0x20':
food_ranges = read_table(sql_engine, "SELECT * FROM food_ranges_entity")
food_ranges = food_ranges.set_index('Nutrient')
return food_ranges
# "Newt Machine Readable" - MicroNutrients
if db_bitmap == '0x40':
micro_nutrients = read_table(sql_engine, "SELECT * FROM micronutrients_entity")
return micro_nutrients
# "Newt Machine Readable" - MicroNutrients
if db_bitmap == '0x80':
food_units = read_table(sql_engine, "SELECT * FROM food_units_entity")
return food_units
# "Newt Machine Readable" - BloodTestValues
if db_bitmap == '0x100':
bloodtest_vals = read_table(sql_engine, "SELECT * FROM bloodtest_vals_entity")
return bloodtest_vals
# "Zameret_hebrew_features" - Weight aliases
if db_bitmap == '0x200':
food_units_aliases = read_table(sql_engine, "SELECT * FROM food_units_aliases_entity")
return food_units_aliases
# "Zameret_hebrew_features" - For Noa
if db_bitmap == '0x400':
food_units_features_df = read_table(sql_engine, "SELECT * FROM tzameret_newt_entity")
food_units_features = food_units_features_df.dropna(axis=0, how='all')
food_units_features = food_units_features.rename({'Primary_SN': 'smlmitzrach'},
axis=1)
return food_units_features
# "Zameret_hebrew_features" - subs_tags_alias
if db_bitmap == '0x800':
subs_tags_alias = read_table(sql_engine, "SELECT * FROM subs_tags_aliases_entity")
subs_tags_alias = subs_tags_alias.set_index('Entity Alias').fillna(0)
return subs_tags_alias
if db_bitmap == '0x1000':
Weights_and_measures = read_table(sql_engine, "SELECT * FROM weights_measures")
return Weights_and_measures
def load_db_googleSheet(db_bitmap):
db_dict = {}
# "Zameret food list 22_JAN_2020"
if (db_bitmap & 0x1) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=84892416"
s = requests.get(url).content
db_dict['tzameret'] = pd.read_csv(io.StringIO(s.decode('utf-8'))).fillna(0)
# "Zameret_hebrew_features" - entities aliases
if (db_bitmap & 0x2) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1805881936"
s = requests.get(url).content
db_dict['lut'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Entity Alias"],
usecols=["Entity Alias", "Entity", "Units",
"Entity name", "RDA name",
"action_simple_question",
"action_nutrition_howmanyxiny_x",
"action_nutrition_howmanyxiny_y",
"action_nutrition_is_food_healthy",
"action_nutrition_is_food_recommended",
"action_nutrition_what_is_healthier_x",
"action_nutrition_what_is_healthier_y",
"action_nutrition_get_rda",
"action_nutrition_bloodtest_generic",
"action_nutrition_bloodtest_value",
"action_nutrition_food_substitute",
"action_nutrition_compare_foods",
"action_nutrition_howmanyxyinz"]).fillna(0)
# "Zameret_hebrew_features" - nutrients_questions
if (db_bitmap & 0x4) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1706335378"
s = requests.get(url).content
db_dict['nutrients_qna'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Entity"]).fillna(0)
# "Zameret_hebrew_features" - Food questions
if (db_bitmap & 0x8) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1099284657"
s = requests.get(url).content
db_dict['food_qna'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["nutrition_density"],
usecols=["nutrition_density", "energy_density",
"description_density"]).fillna(0)
# "Zameret_hebrew_features" - List of common foods
if (db_bitmap & 0x10) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=495295419"
s = requests.get(url).content
db_dict['common_food'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["common_name"],
usecols=["common_name", "shmmitzrach", "smlmitzrach"]).fillna(0)
# "Newt Machine Readable" - FoodItemRanges
if (db_bitmap & 0x20) > 0:
url = "https://docs.google.com/spreadsheets/d/1IPTflCe6shaP-FBAuXWSFCX5hSuAo7bMGczNMTSTYY0/export?format=csv&gid=885087351"
s = requests.get(url).content
db_dict['food_ranges'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Nutrient"],
usecols=["Nutrient", "Medium - threshold per 100gr",
"High - threshold per 100gr",
"good_or_bad", "tzameret_name", "hebrew_name"]).fillna(0)
# "Newt Machine Readable" - MicroNutrients
if (db_bitmap & 0x40) > 0:
url = "https://docs.google.com/spreadsheets/d/1IPTflCe6shaP-FBAuXWSFCX5hSuAo7bMGczNMTSTYY0/export?format=csv&gid=222801095"
s = requests.get(url).content
micro_nutrients_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0).fillna(0)
db_dict['micro_nutrients'] = micro_nutrients_df
# "Newt Machine Readable" - MicroNutrients
if (db_bitmap & 0x80) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1373096469"
s = requests.get(url).content
food_units_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0).fillna(0)
db_dict['food_units'] = food_units_df
# "Newt Machine Readable" - BloodTestValues
if (db_bitmap & 0x100) > 0:
url = "https://docs.google.com/spreadsheets/d/1IPTflCe6shaP-FBAuXWSFCX5hSuAo7bMGczNMTSTYY0/export?format=csv&gid=1011022304"
s = requests.get(url).content
bloodtest_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0, nrows=19, usecols=range(11)).fillna(0)
db_dict['bloodtest_vals'] = bloodtest_df
# "Zameret_hebrew_features" - Weight aliases
if (db_bitmap & 0x200) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=623521836"
s = requests.get(url).content
food_units_aliases_df = pd.read_csv(io.StringIO(s.decode('utf-8')), header=0)
db_dict['food_units_aliases'] = food_units_aliases_df
# "Zameret_hebrew_features" - For Noa
if (db_bitmap & 0x400) > 0:
url = "https://docs.google.com/spreadsheets/d/19rYDpki0jgGeNlKLPnINiDGye8QEfQ4IEEWSkLFo83Y/export?format=csv&gid=2106834268"
s = requests.get(url).content
food_units_features_df = pd.read_csv(io.StringIO(s.decode('utf-8')), header=1)
db_dict['food_units_features'] = food_units_features_df.dropna(axis=0, how='all')
db_dict['food_units_features'] = db_dict['food_units_features'].rename({'Primary_SN': 'smlmitzrach'},
axis=1)
# "Zameret_hebrew_features" - subs_tags_alias
if (db_bitmap & 0x800) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=458428667"
s = requests.get(url).content
db_dict['subs_tags_alias'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
usecols=["Entity Alias", "Entity", "Show_stopers"]).set_index(
'Entity Alias')
if (db_bitmap & 0x1000) > 0:
url = "https://docs.google.com/spreadsheets/d/19rYDpki0jgGeNlKLPnINiDG<KEY>EWSkLFo83Y/export?format=csv&gid=428717261"
s = requests.get(url).content
db_dict['Weights_and_measures'] = pd.read_csv(io.StringIO(s.decode('utf-8')), header=0)
return db_dict
# ------------------------------------------------------------------
def import_sheets(debug=False):
'''Import the df noa and tzameret food group tabs from the suggested meal planning sheet as a DataFrame. Import weights and measures, and tzameret food list from Tzameret DB as a DataFrame'''
# df = load_db_googleSheet(0x481)
df = get_tables('0x481')
sheet_id = '19rYDpki0jgGeNlKLPnINiDGye8QEfQ4IEEWSkLFo83Y'
gid_2 = '428717261'
df_tzameret_food_group = pd.read_csv(
f"https://docs.google.com/spreadsheets/d/{sheet_id}/export?format=csv&gid={gid_2}")
df_nutrition = df['tzameret']
df_nutrition.fillna(0, inplace=True)
df_nutrition.rename(columns={'carbohydrates': 'carbs'}, inplace=True)
df_weights = df['food_units']
df_weights.head()
df_noa_pre_1 = df['food_units_features']
# df_tzameret_food_group = ['Weights_and_measures']
df_noa = df['food_units_features']
header = list(df_noa_pre_1.columns.values)
df_noa.loc[-1] = header # adding a row
df_noa.index = df_noa.index + 1 # shifting index
df_noa = df_noa.sort_index() # sorting by index
df_noa.head()
df_noa.columns = df_noa.columns.str.lower()
df_noa = df_noa.iloc[1:] # df_noa doesn not have the first row with the numbers to make it easier to filter data
df_noa['lactose_free'] = df_noa['lactose_free'].replace({'Low Lactose': 'Yes', 'Lactose Free': 'Yes'})
df_noa['food_category'] = df_noa['food_category'].replace({'N/A': 'Savoury_Snacks'})
df_noa.dropna(subset=["food_name"],
inplace=True) # dropping all meals that don't have a meal name to get complete list of actual meals
df_noa = df_noa.rename(columns={'smlmitzrach': 'primary_sn'})
df_noa['sn_1'] = df_noa['primary_sn'].astype(str).str[:1]
df_noa['sn_2'] = df_noa['primary_sn'].astype(str).str[1:2]
return df_noa, df_tzameret_food_group, df_weights, df_nutrition
# ------------------------------------------------------------------
def get_rda(name, tracker, nutrient_temp="", intent_upper=False):
actions_list = ['action_nutrition_howmanyxyinz', 'action_nutrition_compare_foods']
# db_dict = load_db_googleSheet(0x46)
db_dict = get_tables('0x46')
lut_df = db_dict['lut']
micro_nutrients_df = db_dict['micro_nutrients']
if intent_upper:
micro_nutrients_df = micro_nutrients_df[micro_nutrients_df['Type'] == "Upper Limit"]
else:
micro_nutrients_df = micro_nutrients_df[micro_nutrients_df['Type'] == "RDA"]
status = "match"
if not (tracker.get_slot('gender') and tracker.get_slot('age') and tracker.get_slot(
'weight') and tracker.get_slot(
'height')):
status = "default"
nutrient = None
if name in actions_list:
nutrient = nutrient_temp
else:
x = tracker.get_slot('x') if tracker.get_slot('x') else None
if x is not None and x is not "":
nutrient = x
else:
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[name].values:
nutrient = ent['value']
break
if nutrient is None:
nutrient = nutrient_temp
try:
feature = lut_df['Entity'][nutrient]
feature_rda = lut_df['RDA name'][lut_df['Entity name'] == feature][0]
gender = "Male"
if tracker.get_slot('gender') == "זכר":
gender = "Male"
elif tracker.get_slot('gender') == "נקבה":
gender = "Female"
user_vars = {}
user_vars['age'] = tracker.get_slot('age') if tracker.get_slot('age') else "40"
user_vars['weight'] = tracker.get_slot('weight') if tracker.get_slot('weight') else "80"
user_vars['height'] = tracker.get_slot('height') if tracker.get_slot('height') else "180"
rda_row = micro_nutrients_df[(micro_nutrients_df['Micronutrient'] == feature_rda) & \
((micro_nutrients_df['Gender'] == "ANY") | (
micro_nutrients_df['Gender'] == gender)) & \
((micro_nutrients_df['Pregnancy'] == "ANY") | (
micro_nutrients_df['Pregnancy'] == "No")) & \
((micro_nutrients_df['Lactating'] == "ANY") | (
micro_nutrients_df['Lactating'] == "No")) & \
((micro_nutrients_df['Age Min'] == "ANY") | (
micro_nutrients_df['Age Min'].astype(float) <= int(
user_vars['age']))) & \
((micro_nutrients_df['Age Max'] == "ANY") | (
micro_nutrients_df['Age Max'].astype(float) > int(user_vars['age'])))]
rda_text = str(rda_row['Free Text'].values[0])
rda_value = str(rda_row['Value'].values[0])
rda_units = rda_row['Units'].values[0]
rda_Image = rda_row['Image'].values[0]
if 'slot#' in rda_value:
rda_value_list = rda_value.split(' ')
for k, el in enumerate(rda_value_list):
if 'slot#' in el and el.split('#')[1] in user_vars:
rda_value_list[k] = user_vars[el.split('#')[1]]
strs = ' '.join([str(elem) for elem in rda_value_list])
# rda_value = eval(' '.join(rda_value_list))
rda_value = eval(str(strs))
rda_value = float(rda_value)
if 'slot#' in rda_text:
rda_text_list = rda_text.split(' ')
for k, el in enumerate(rda_text_list):
if 'slot#' in el:
rda_text_list[k] = tracker.get_slot(el.split('#')[1])
rda_text = ' '.join(rda_text_list)
rda_text_list = re.findall('\{.*?\}', rda_text)
for match in rda_text_list:
rda_text = rda_text.replace(match, str(eval(match[1:-1])))
if rda_text == "0":
rda_text = ""
return rda_value, rda_units, rda_text, status, nutrient, rda_Image
except:
return -1, -1, "", "missmatch", nutrient, ""
# ------------------------------------------------------------------
def get_personal_str(rda_status, tracker):
age = tracker.get_slot('age') if tracker.get_slot('age') and rda_status == "match" else '40'
gender = tracker.get_slot('gender') if tracker.get_slot('gender') and rda_status == "match" else 'זכר'
weight = tracker.get_slot('weight') if tracker.get_slot('weight') and rda_status == "match" else '80'
height = tracker.get_slot('height') if tracker.get_slot('height') and rda_status == "match" else '180'
if rda_status == "default":
personal_str = "עבור %s בגיל %s במשקל %s ובגובה %s" % (gender, age, weight, height)
else:
personal_str = "עבורך (%s בגיל %s במשקל %s ובגובה %s)" % (gender, age, weight, height)
return personal_str
# ------------------------------------------------------------------
def get_food_nutrition_density(food, food_ranges_db):
# Nutrition Density is defined in Tzameret:
density_normalized = float(food["Nutrition density normalized"])
# Thresholds are defined in Machine-Readable:
density = food_ranges_db[food_ranges_db.index == "Nutrition density"]
density_med = float(density["Medium - threshold per 100gr"])
density_high = float(density["High - threshold per 100gr"])
# Binning:
res = "high"
if density_normalized < density_med:
res = "low"
elif density_normalized < density_high:
res = "med"
return density, res
# ------------------------------------------------------------------
def get_food_energy_density(food, food_ranges_db):
# Energy Density is defined in Tzameret:
density_normalized = float(food["Energy density"])
# Thresholds are defined in Machine-Readable:
density = food_ranges_db[food_ranges_db.index == "Energy density"]
density_med = float(density["Medium - threshold per 100gr"])
density_high = float(density["High - threshold per 100gr"])
# Binning:
res = "high"
if density_normalized < density_med:
res = "low"
elif density_normalized < density_high:
res = "med"
return density, res
# ------------------------------------------------------------------
def getcolums_FromDataFrame(db, colum):
res = ''
index_temp = 1
for index, row in db.iterrows():
res = res + str(index_temp) + '. ' + row[colum] + '\n'
index_temp += 1
return res
def get_entity_filters(entity_fromrasa, negative_words_list, lut):
entities_temp = {}
entity = re.sub('וו|-|מנה|ארוחה|גם|הוא|שהוא|שהיא|[,?/123456789]|והיא|והוא', '', entity_fromrasa)
entity = entity.replace(' ', ' ')
entities = entity.split(' ')
for index, ent in enumerate(entities):
filter_type_word = False
ent = ent.strip()
if ent[0] == 'ו':
ent = ent[1:]
entities_temp[ent] = fliter_type(ent, lut, filter_type_word)
if ent == '' or ent == ' ' or ent in negative_words_list or len(ent) <= 2:
continue
if index == 0 and ent in negative_words_list:
continue
if index == 0:
entities_temp[ent] = fliter_type(ent, lut, filter_type_word)
else:
if entities[index - 1] in negative_words_list:
filter_type_word = True
entities_temp[ent] = fliter_type(ent, lut, filter_type_word)
return entities_temp
def fliter_type(entity, lut, type_filter):
# get the entity alias for the Zameret
if 'פלאו' in entity or 'פליאו' in entity:
entity = "Paleo"
elif 'טבעוני' in entity or 'טבעונית' in entity or 'טבעוניים' in entity or 'טבעונים' in entity:
entity = "Vegan"
elif 'צמחוני' in entity or 'צמחוניים' in entity or 'צמחונית' or 'צמחונים' in entity:
entity = "Vegetarian"
else:
entity = lut[lut.index == entity]['Entity'].iloc[0]
if entity == 'Dairy' and type_filter is False:
return entity, 'No'
if entity == 'Fish_Free' and type_filter is False:
return entity, 'No'
elif entity == 'Fish_Free':
return entity, 'Yes'
return entity, 'Yes'
def how_many_x_in_y_core(x, y, food_units, name, tracker):
# db_dict = load_db_googleSheet(0x293)
db_dict = get_tables('0x293')
y_common = y
if y in db_dict['common_food'].index:
y_common = db_dict['common_food'][db_dict['common_food'].index == y]['shmmitzrach'][0]
else:
y_food = ' '.join(y.split(' ')[1:])
food_units = db_dict['food_units_aliases'][db_dict['food_units_aliases']['Unit Alias'] == y.split(' ')[0]][
'Zameret unit']
if food_units.empty:
food_units = y.split(' ')[0]
else:
food_units = food_units.values[0]
if y_food in db_dict['common_food'].index:
y_common = db_dict['common_food'][db_dict['common_food'].index == y_food]['shmmitzrach'][0]
else:
y_common = y_food
food = db_dict['tzameret'][db_dict['tzameret']['shmmitzrach'].str.contains(y_common)].iloc[0, :]
feature = db_dict['lut'][db_dict['lut'].index == x]["Entity"][0]
units = db_dict['lut'][db_dict['lut'].index == x]["Units"][0]
food_units_row = pd.Series()
if food_units:
food_units_row = db_dict['food_units'][(db_dict['food_units']['smlmitzrach'] == food['smlmitzrach']) &
(db_dict['food_units']['shmmida'] == food_units)]
is_food_units_match = not food_units_row.empty or food_units == "100 גרם"
food_units_factor = 1.0
if not food_units_row.empty:
food_units_factor = food_units_row['mishkal'].values[0] / 100
val = food[feature] * food_units_factor
if units == 0:
res = "ב-%s של %s יש %.2f %s" % (food_units, food['shmmitzrach'], float(val), x)
else:
res = ""
if not is_food_units_match:
res = "לא הצלחתי למצוא נתונים במאגר על היחידה %s עליה שאלת\n" % food_units
res += "היחידות הבאות קיימות במאגר, עבור %s:\n" % food['shmmitzrach']
res += ', '.join(db_dict['food_units'][db_dict['food_units']['smlmitzrach'] == food['smlmitzrach']][
'shmmida'].to_list())
res += "\n"
food_units = "100 גרם"
res += "ב-%s של %s יש %.2f %s %s" % (food_units, food['shmmitzrach'], float(val), units, x)
rda_val, rda_units, rda_text, rda_status, nutrient, x_1 = get_rda(name, tracker, x)
if rda_val > 0 and units not in ['יחב"ל']: #
rda = 100 * float(val) / rda_val
res += "\n"
res += "שהם כ-%d אחוז מהקצובה היומית המומלצת %s" % (int(rda), get_personal_str(rda_status, tracker))
if rda_text and rda_text != '0':
res += '\n' + rda_text
return val, res
def reverse_number(n):
rev = 0
while (n > 0):
a = n % 10
rev = rev * 10 + a
n = n // 10
return rev
# ------------------------------------------------------------------
# ____ _ _ _ __ __ _
# | __ ) _ _(_) | __| | | \/ | ___ __ _| |
# | _ \| | | | | |/ _` | | |\/| |/ _ \/ _` | |
# | |_) | |_| | | | (_| | | | | | __/ (_| | |
# |____/ \__,_|_|_|\__,_| |_| |_|\___|\__,_|_|
# Dictionary that is equivalent to user inputs and filters the df_noa Database based on the inputs
def get_coupling(meals_bank, coupling, caloires, can_grams, amount, df, item_type):
# type float of the coupling is always nan
if isinstance(coupling, float):
return 0, False, 0, 0, 0, 0, 0, ''
if 'NaN' in coupling or 'nan' in coupling or coupling == '':
return 0, False, 0, 0, 0, 0, 0, ''
coupling = str(coupling)
couling_list = coupling.split(',')
if couling_list == []:
return 0, False, 0, 0, 0, 0, 0, ''
# get random number for the coupling
end = len(couling_list) - 1
if '\n' or 'r' in couling_list[len(couling_list) - 1]:
end = len(couling_list) - 2
if len(couling_list) == 1:
coupling_foods = meals_bank[meals_bank['#'] == int(couling_list[0])]
else:
couling_number = random.randint(0, end)
coupling_foods = meals_bank[meals_bank['#'] == int(couling_list[0])]
serial_sn = coupling_foods['primary_sn'].values
food_name = coupling_foods['food_name'].values
if len(serial_sn) <= 0 or len(food_name) <= 0:
return 0, False, 0, 0, 0, 0, 0, ''
coupling_foods_serial = int(serial_sn[0])
# get the mida of the coupling food
candidate_units = candidate_units_amounts(df, coupling_foods_serial, item_type)
candidate_grams = candidate_units[0]
for can_grams in candidate_grams:
calories_couple, weight, grams, x, y = get_item_property(coupling_foods_serial, can_grams, amount)
if caloires < 0:
caloires = caloires * -1
if calories_couple <= caloires:
return calories_couple, True, coupling_foods_serial, weight, grams, x, y, food_name[0]
return 0, False, 0, 0, 0, 0, 0, ''
def checkDoublePattern(sentence, pattern):
temp = sentence.count(pattern)
if temp == 2:
return sentence[:sentence.find(pattern) + len(pattern)]
return sentence
def update_budgets(daily_budget, meals_num, snacks_num, weights):
'''Takes total budget, number of meals and snacks, and weights as paramters. Returns budget for each category for every meal'''
# change 0.3 to a user params
budgets = {}
div = (meals_num + inputs.get(
'budget_var') * snacks_num) # Is this supposed to be budget_var(0.3) times snacks num or budget_var times meals_num
if div > 0:
budgets['meal'] = round(daily_budget / div, 1)
budgets['snack'] = round(inputs.get('budget_var') * daily_budget / div, 1)
budgets['Carbs'] = round(weights[0] * budgets['meal'], 1)
budgets['Protein'] = round(weights[1] * budgets['meal'], 1)
budgets['Vegetables'] = round(weights[2] * budgets['meal'], 1)
budgets['Fruits'] = round(weights[3] * budgets['snack'], 1)
budgets['Fat'] = round(weights[4] * budgets['snack'], 1)
budgets['Fat_meal'] = round(weights[4] * budgets['meal'], 1)
budgets['Savoury_Snacks'] = round(weights[5] * budgets['snack'], 1)
budgets['Sweets'] = round(weights[6] * budgets['snack'], 1)
budgets['all'] = round(daily_budget, 1)
return budgets
def filter_meals_by_features(user_params, df_feature):
'''Takes user inputs and a Dataframe as parameters and returns a DataFrame filtered by the user inputs'''
for k, v in user_params.items():
if (v == 'Yes') and (debug['debug_en']):
df_feature = df_feature.loc[df_feature[k] == v]
return df_feature
def filter_meals_by_meal_type(df, meal_type):
'''Filters the DataFrame by the meal type to be used in making a scoreboard for each meal like breakfast, lunch etc.'''
if debug:
return df.loc[(df['il_' + meal_type] == 'Yes')]
def candidate_units_amounts(item, sn, items_type):
'''Returns the different options for mida amount and servings for each amount'''
sn_1 = int(item['sn_1'].values[0])
df_max_meal = df_tzameret_food_group.loc[df_tzameret_food_group['ספרה ראשונה בקוד'] == sn_1]
units_intersection = []
amounts_intersection = []
if items_type != 'snack':
df_max_meal = df_tzameret_food_group.loc[df_tzameret_food_group['ספרה ראשונה בקוד'] == sn_1]
max_amount_meal = df_max_meal['mida_maxAmount_meal'].values[0].replace(' ', '').split(',')
min_amount_meal = df_max_meal['mida_minAmount_meal'].values[0].replace(' ', '').split(',')
df_weights_list = df_weights[df_weights['smlmitzrach'] == sn]
weights_list = df_weights_list['mida'].tolist()
min_max_amount_meal_units = [int(value.split('_')[0]) for value in max_amount_meal]
min_max_amount_meal_amounts = [list(range(int(min_val.split('_')[1]), int(max_val.split('_')[1]) + 1)) for
min_val, max_val in zip(min_amount_meal, max_amount_meal)]
for k, value in enumerate(min_max_amount_meal_units):
if value in weights_list:
units_intersection.append(value)
amounts_intersection.append(min_max_amount_meal_amounts[k])
else:
max_amount_snack = df_max_meal['mida_maxAmount_snack'].values[0].replace(' ', '').split(',')
df_weights_list = df_weights[df_weights['smlmitzrach'] == sn]
weights_list = df_weights_list['mida'].tolist()
max_amount_snack_units = [int(value.split('_')[0]) for value in max_amount_snack]
max_amount_snack_amounts = [list(range(1, int(value.split('_')[1]) + 1)) for value in max_amount_snack]
for k, value in enumerate(max_amount_snack_units):
if value in weights_list:
units_intersection.append(value)
amounts_intersection.append(max_amount_snack_amounts[k])
return units_intersection, amounts_intersection
def get_item_property(sn, grams, serving):
'''Returns the total item calories for each item'''
# if the mida is 700 then multiply by 100, if any other number divide by 100
weights = df_weights[(df_weights['smlmitzrach'] == sn) & (df_weights['mida'] == grams)]
mishkal = weights.iloc[0]['mishkal']
if mishkal == 700:
mishkal = mishkal * 100
else:
mishkal = mishkal / 100
attribute = df_nutrition.loc[df_nutrition['smlmitzrach'] == str(int(sn))]
attribute_total = attribute.iloc[0]['food_energy']
total = attribute_total * mishkal * serving
return total, weights.iloc[0]['shmmida'], weights.iloc[0]['mishkal'], weights, serving
def update_calorie_budgets(candidate_calories, item_type, bud):
'''Updates the calories budget based on how many calories were already used'''
bud[item_type] = bud[item_type] - candidate_calories
return bud
def check_item_type_if_exist_already(meals, df_noa):
if meals == [] or len(meals) == 1:
return True
for item in meals:
sn = item['primary_sn'].values[0]
item_temp = df_noa[df_noa['primary_sn'] == str(sn)]
if item_temp['beef_chicken_fish'].iloc[0] == 'No':
return False
return True
def build_meal(meals_bank, meal_type, budget):
# make histogram without penalty score of runnning the simulator 50 times and picking the winners. Run it again with the penalty score
'''Builds a meal taking a DataFrame, meal type and budget as parameters. Meal takes item from each category (Carbs, Protein etc.) and returns the meal, weighted average score and total meal calories'''
budget_weights = {**budget_weights_meals, **budget_weights_snacks_fruits_fat, **budget_weights_savoury_snacks,
**budget_weights_sweets}
bud = {}
meal_similarity_list = []
df_health = df_nutrition.iloc[1:]
max_meal_items = inputs.get('max_items_snack') if meal_type == 'snack' else inputs.get('max_items_meal')
nutrition_density_list = []
energy_density_list = []
meal_score = 0
score_list = []
uti_score = []
ind_score = []
score = 0
meals = []
meal_cals = 0
types = []
total_budget = budget.copy()
item_types = {'breakfast': ['Carbs', 'Protein', 'Vegetables'],
'lunch': ['Carbs', 'Protein', 'Vegetables'],
'dinner': ['Carbs', 'Protein', 'Vegetables'],
'snack': ['Fat']}
if (snacks.get('sweets') == 'Yes') & (len(meals_bank.loc[meals_bank['food_category'] == 'Sweets']) > 0):
item_types['snack'].append('Sweets')
if (snacks.get('Savoury_Snacks') == 'Yes') & (
len(meals_bank.loc[meals_bank['food_category'] == 'Savoury_Snacks']) > 0):
item_types['snack'].append('Savoury_Snacks')
if (user_params.get('fruits') == 'No') & (len(meals_bank.loc[meals_bank['food_category'] == 'Fruits']) > 0):
item_types['snack'].append('Fruits')
for k in range(max_meal_items):
for item_type in item_types[meal_type]:
success = False
if (len(meals_bank.loc[meals_bank['food_category'] == item_type]) > 0):
df = meals_bank.loc[meals_bank['food_category'] == item_type].sample()
# get the item primary sn
sn = int(df['primary_sn'].values[0])
df['primary_sn'] = sn
candidate_units = candidate_units_amounts(df, int(df['primary_sn'].values[0]), item_type)
candidate_grams = candidate_units[0]
for can_grams in candidate_grams:
sn = float(df['primary_sn'].values[0])
for candidate_amount in candidate_units[1]:
for amount in reversed(candidate_amount):
calories, weight, grams, x, y = get_item_property(int(sn), can_grams, amount)
can_cals = getattr(calories, "tolist", lambda: candidate_calories)()
coupling_numbers = df['coupling'].values[0]
coupling_cals, coupling_boolean, coupling_food_primary_sn, weight_coupling, grams_coupling, x, y, food_name_coupling = get_coupling(
meals_bank, coupling_numbers, budget[item_type] - can_cals, can_grams, amount, df,
item_type)
can_cals += coupling_cals
if can_cals < budget[item_type]:
success = True
if success:
if success:
# check if item of meal type is exist in the meal already
if check_item_type_if_exist_already(meals, df_noa):
sn_int = int(df['primary_sn'].astype(str).str[:1])
sn1 = int(df['primary_sn'].values[0])
calories1, weight, grams, x, y = get_item_property(sn1, can_grams, amount)
bud[item_type] = getattr(calories1, "tolist", lambda: candidate_calories)()
units_priority = candidate_grams.index(can_grams) + 1
meal_score += 1 / units_priority
df_sn1 = df_tzameret_food_group.loc[
df_tzameret_food_group['ספרה ראשונה בקוד'] == sn_int]
df_fish = df_noa.loc[df_noa['primary_sn'] == str(sn1)]
food_group = df_sn1['קבוצת המזון']
if sn_int == 2:
if df_fish['fish_free'].iloc[0] == 'Yes':
meal_similarity_list.append(2.1)
else:
meal_similarity_list.append(2.2)
else:
meal_similarity_list.append(sn_int)
item_score = (bud[item_type]) / (budget[item_type])
df['score'] = item_score
score_list.append(item_score)
types.append(df['food_category'])
nutrition_density_normalized = df_nutrition.loc[
df_nutrition['smlmitzrach'] == str(
int(sn1)), 'Nutrition density normalized']
energy_density = df_health.loc[
df_health['smlmitzrach'] == str(int(sn1)), 'Energy density']
nutrition_density_normalized = nutrition_density_normalized.astype(float)
energy_density = energy_density.astype(float)
if coupling_boolean:
name = df['food_name'].values[0]
name += ' עם ' + food_name_coupling
df['food_name'] = name
dataframe = df[['food_name', 'primary_sn']]
dataframe.insert(2, 'Weight', [grams])
dataframe.insert(3, 'Unit', [weight])
dataframe.insert(4, 'Amount', [amount])
meals.append(dataframe)
nutrition_density_list.append(nutrition_density_normalized.values.tolist())
energy_density_list.append(energy_density.values.tolist())
meal_cals = meal_cals + calories1
budget = update_calorie_budgets(can_cals, item_type, budget)
else:
continue
break
if success or budget[item_type] < units_thr[item_type] or len(meals) >= max_meal_items:
break
if success or budget[item_type] < type_thr[item_type] or len(meals) >= max_meal_items:
break
if budget['all'] < inputs['item_thr'] or len(meals) >= max_meal_items:
break
if len(meals) >= max_meal_items:
break
types_list_no_duplicates = np.unique([x.values[0] for x in types]).tolist()
for each_type in reversed(types_list_no_duplicates):
each_score = (float(total_budget.get(each_type)) - float(budget.get(each_type))) / float(
total_budget.get(each_type))
ind_score.append(each_score)
uti_score.append(budget_weights.get(each_type))
if (len(ind_score) < len(item_types[meal_type])):
ind_score.append(0.000001)
uti_score.append(.35)
if (min(ind_score) < 0.7) and (meal_type != 'snack'):
extra_penalty = inputs.get('extra_penalty')
else:
extra_penalty = 0
if (len(meals)) > 4:
meal_penalty_length = (len(meals) - 4) * inputs.get('meal_penalty_length')
else:
meal_penalty_length = 0
total_utilization = sum(x * y for x, y in zip(ind_score, uti_score)) / sum(uti_score)
if len(meal_similarity_list) != len(set(meal_similarity_list)):
meal_similarity_penalty = inputs.get('meal_similarity_penalty')
else:
meal_similarity_penalty = 0
nutrition_density_list = [float(x) for [x] in nutrition_density_list]
try:
avg_nutrition = round(mean(nutrition_density_list), 4)
except:
avg_nutrition = nutrition_density_list
energy_density_list = [float(x) for [x] in energy_density_list]
avg_energy = round(mean(energy_density_list), 4)
penalty_score = 1 - meal_score / len(meals)
nutrition_boost = avg_nutrition * inputs.get('nutrition_bonus')
energy_boost = avg_energy * inputs.get('energy_bonus')
if scoring.get('legacy'):
score = total_utilization - (
penalty_score * inputs.get('penalty_weight')) - extra_penalty - meal_penalty_length
elif scoring.get('legacy_nut'):
score = total_utilization - (penalty_score * inputs.get(
'penalty_weight')) - extra_penalty - meal_penalty_length + nutrition_boost
elif scoring.get('legacy_ene'):
total_utilization - (
penalty_score * inputs.get('penalty_weight')) - extra_penalty - meal_penalty_length + energy_boost
else:
score = total_utilization - (penalty_score * inputs.get(
'penalty_weight')) - extra_penalty - meal_penalty_length + energy_boost + nutrition_boost
return meals, score, meal_cals, ind_score, meal_penalty_length, avg_nutrition, avg_energy, meal_similarity_penalty, meal_similarity_list
def build_meal_wrapper():
energy_density_listy = 0.0
meal_similarity_listy = []
nutrition_density_listy = []
meal_similarity_penaltyy = []
nutrition_density_listx = []
energy_density_listx = 0.0
meal_similarity_penaltyx = []
meal_similarity_listx = []
penalty_lengthy = []
# Builds and populates a scoreboard that sorts the meals based on their score
x = -3
pd.set_option('precision', 2)
max_iterations = inputs.get('max_iter')
budget_weights = {**budget_weights_meals, **budget_weights_snacks_fruits_fat, **budget_weights_savoury_snacks,
**budget_weights_sweets}
budget_weights_list = []
for k, v in budget_weights.items():
budget_weights_list.append(v)
score_tracker = -2
total_cals = 0
meals = {}
user_meals_num = inputs.get('meals_num')
user_snacks_num = inputs.get('snacks_num')
filler = []
meal_types = ['breakfast', 'lunch', 'dinner']
for k in range(inputs.get('snacks_num')):
meal_types.append('snack')
features = filter_meals_by_features(user_params, df_noa)
for meal_type in meal_types:
bank = filter_meals_by_meal_type(features, meal_type)
x += 1
scoreboard = {}
for k in range(inputs.get('max_iter')):
budgets_dynamic = update_budgets(inputs.get('total_cals'), inputs.get('meals_num'),
inputs.get('snacks_num'), budget_weights_list)
meal_budget = update_budgets(inputs.get('total_cals'), inputs.get('meals_num'),
inputs.get('snacks_num'),
budget_weights_list)
if meal_type != 'snack':
mealy, scorey, calsy, ut_scorey, penalty_lengthy, nutrition_density_listy, energy_density_listy, meal_similarity_penaltyy, meal_similarity_listy = build_meal(
bank, meal_type, budgets_dynamic)
if mealy and scorey and min(ut_scorey) > 0:
scoreboard[meal_type] = mealy, scorey, calsy
if scoreboard[meal_type][1] > score_tracker:
score_tracker = scoreboard[meal_type][1]
total_cals = scoreboard[meal_type][2]
else:
mealx, scorex, calsx, ut_scorex, penalty_lengthx, nutrition_density_listx, energy_density_listx, meal_similarity_penaltyx, meal_similarity_listx = build_meal(
bank, meal_type, meal_budget)
if mealx:
scoreboard[
meal_type] = mealx, scorex, calsx, nutrition_density_listx, energy_density_listx, meal_similarity_penaltyx, meal_similarity_listx
if scoreboard:
meals[meal_type] = scoreboard[meal_type]
for meal_name, whole_meal in scoreboard.items():
df = pd.concat(whole_meal[0])
df = pd.DataFrame(df.values.reshape(1, -1))
df['score'] = float(scoreboard[meal_type][1])
df['meal_cals'] = scoreboard[meal_type][2]
if meal_name != 'snack':
df['name'] = meal_name
df['budget per meal'] = meal_budget.get('meal')
df['meal budget utilization'] = (df['meal_cals'] / df['budget per meal'])
df['average nutrtition'] = nutrition_density_listy
df['average energy'] = energy_density_listy
df['meal_similarity_penalty'] = meal_similarity_penaltyy
df['meal_similarity_list'] = pd.Series([meal_similarity_listy])
df.set_index('name', drop=True, inplace=True)
else:
df['name'] = meal_name + " " + str(x)
df['budget per snack'] = budgets_dynamic.get('snack')
df['snack budget utilization'] = (df['meal_cals'] / df['budget per snack'])
df['average nutrtition'] = nutrition_density_listx
df['average energy'] = energy_density_listx
df['meal_similarity_penalty'] = meal_similarity_penaltyx
df['meal_similarity_list'] = pd.Series([meal_similarity_listx])
df.set_index('name', drop=True, inplace=True)
if meal_name != 'snack':
# rename all the budget as budget leftover so its carbs budget leftover etc.
df['meal penalty length'] = penalty_lengthy
df['carb budget per meal'] = int(meal_budget.get('Carbs'))
df['carbs budget remaining'] = budgets_dynamic.get('Carbs')
df['carb budget utilization'] = (meal_budget.get('Carbs') - budgets_dynamic.get(
'Carbs')) / meal_budget.get('Carbs')
df['protein budget per meal'] = meal_budget.get('Protein')
df['protein budget remaining'] = budgets_dynamic.get('Protein')
df['protein budget utilization'] = (meal_budget.get('Protein') - budgets_dynamic.get(
'Protein')) / meal_budget.get('Protein')
df['vegetable budget per meal'] = meal_budget.get('Vegetables')
df['vegetable budget remaining'] = budgets_dynamic.get('Vegetables')
df['vegetable budget utilization'] = (meal_budget.get('Vegetables') - budgets_dynamic.get(
'Vegetables')) / meal_budget.get('Vegetables')
df['fat budget per meal'] = meal_budget.get('Fat_meal')
df['fat budget remaining'] = budgets_dynamic.get('Fat_meal')
df['fat budget utilization'] = (meal_budget.get('Fat_meal') - budgets_dynamic.get(
'Fat_meal')) / meal_budget.get('Fat_meal')
else:
if snacks.get('sweets') == "Yes":
df['sweets budget per snack'] = meal_budget.get('Sweets')
df['sweets budget remaining'] = budgets_dynamic.get('Sweets')
df['sweets budget utilization'] = (meal_budget.get('Sweets') - budgets_dynamic.get(
'Sweets')) / meal_budget.get('Sweets')
if snacks.get('Savoury_Snacks') == 'Yes':
df['savoury budget per snack'] = meal_budget.get('Savoury_Snacks')
df['savoury budget remaining'] = budgets_dynamic.get('Savoury_Snacks')
df['savoury budget utilization'] = (meal_budget.get('Savoury_Snacks') - budgets_dynamic.get(
'Savoury_Snacks')) / meal_budget.get('Savoury_Snacks')
if user_params.get('fruits') == 'No':
df['fruits budget per snack'] = meal_budget.get('Fruits')
df['fruits budget remaining'] = budgets_dynamic.get('Fruits')
df['fruits budget utilization'] = (meal_budget.get('Fruits') - budgets_dynamic.get(
'Fruits')) / meal_budget.get('Fruits')
df['fat budget per snack'] = meal_budget.get('Fat')
df['fat budget remaining'] = budgets_dynamic.get('Fat')
df['fat budget utilization'] = (meal_budget.get('Fat') - budgets_dynamic.get(
'Fat')) / meal_budget.get('Fat')
filler.append(df)
if meal_type == 'snack':
user_snacks_num -= 1
else:
user_meals_num -= 1
budgets_dynamic = update_budgets(float(inputs.get('total_cals') - total_cals), user_meals_num, user_snacks_num,
budget_weights_list)
df_meals = pd.concat(filler)
df_final = df_meals.sort_values(by=['name', 'score'], ascending=[True, False])
df_final.rename(columns={0: "Item 1", 1: "Primary SN 1", 2: "Weight", 3: "Unit1", 4: "Amount1",
5: "Item 2", 6: "Primary SN 2", 7: "Weight", 8: "Unit2", 9: "Amount2",
10: "Item 3", 11: "Primary SN 3", 12: "Weight", 13: "Unit3", 14: "Amount3",
15: "Item 4", 16: "Primary SN 4", 17: "Weight", 18: "Unit4", 19: "Amount4"}
, inplace=True)
return df_final
def displayMeal(data, mealType, items_meal_number, sncack_numbers, df_nutrition):
menu = ""
calories = 0
# hole day menu
carbs = 0
protein = 0
fat = 0
if len(mealType) > 1:
for meal in mealType:
items, temp_calories, temp_carbs, temp_protein, temp_fat = getMeal(data, meal, items_meal_number,
df_nutrition)
calories += temp_calories
menu = menu + items
carbs = carbs + temp_carbs
protein = protein + temp_protein
fat = fat + temp_fat
# one meal for the user
else:
menu, calories, carbs, protein, fat = getMeal(data, mealType[0], items_meal_number, df_nutrition)
return menu, carbs, protein, fat
snacks, calories_snack, carbs_temp, temp_protein, temp_fat = getSnack(data, sncack_numbers, df_nutrition)
carbs = carbs + carbs_temp
protein = protein + temp_protein
fat = fat + temp_fat
menu = menu + snacks
calories += calories_snack
menu = menu + "\n*סך הכל קלוריות -> " + str(round(calories, 2)) + '*'
return menu, carbs, protein, fat
def getMeal(data, meal_type, meal_items_nubmer, df_nutrition):
# item[0]-> food name
# item[1]-> serail number
# item[2]-> unit
# item[3]-> amount
# item[4]-> Weight
dic = {'breakfast': 'ארוחת בוקר', 'lunch': 'ארוחת צהריים', 'dinner': 'ארוחת ערב'}
global temp_meal
temp_meal = data[data.index == meal_type]
index_number = 1
for index, row in temp_meal.iterrows():
if isinstance(row['Item 3'], str) and isinstance(row['Item 2'], str) and isinstance(
row['Item 1'], str):
temp_meal = temp_meal.head(index_number).tail(1)
break
index_number += 1
if len(temp_meal.index) > 1:
temp_meal = temp_meal.head(1)
items, items_number = get_items(temp_meal, meal_items_nubmer)
protein, fat, carbs, calories = get_calories(df_nutrition, items)
if items_number == 4:
return "*" + dic[meal_type] + "*:\n1. " + buildItem(items['item1']) + "\n2. " + buildItem(
items["item2"]) + "\n3. " + buildItem(
items['item3']) + "\n4. " + buildItem(
items['item4']) + "\nכמות קלוריות ->" + str(calories) + "\n\n", calories, carbs, protein, fat
return "*" + dic[meal_type] + "*:\n1. " + buildItem(items['item1']) + "\n2. " + buildItem(
items["item2"]) + "\n3. " + buildItem(
items['item3']) + "\nכמות קלוריות ->" + str(calories) + "\n\n", calories, carbs, protein, fat
def get_items(temp_meal, items_number):
meal = {}
items_number_temp = None
for index in range(1, items_number + 1):
item = temp_meal['Item ' + str(index)].iloc[0]
if isinstance(item, str):
items_number_temp = index
meal['item' + str(index)] = [temp_meal['Item ' + str(index)].iloc[0],
temp_meal['Primary SN ' + str(index)].iloc[0],
temp_meal['Unit' + str(index)].iloc[0],
int(temp_meal['Amount' + str(index)].iloc[0]),
float(temp_meal['Weight'].values[0][index - 1])]
else:
break
return meal, items_number_temp
def getSnack(snackData, snack_number, df_nutrition):
# get the line of each snack
snack1 = snackData[snackData.index == "snack 1"]
snack2 = snackData[snackData.index == "snack 2"]
# get the items
snack1_, x = get_items(snack1, snack_number)
snack2_, y = get_items(snack2, snack_number)
protein1, fat1, carb1, snack1_calories = get_calories(df_nutrition, snack1_)
protein2, fat2, carb2, snack2_calories = get_calories(df_nutrition, snack2_)
carb1 = int(carb1)
protein1 = int(protein1)
fat1 = int(fat1)
snack1_calories = int(snack1_calories)
carb2 = int(carb2)
protein2 = int(protein2)
fat1 = int(fat1)
snack2_calories = int(snack2_calories)
if snack_number == 2:
return "*ארוחת ביניים 1*:\n1. " + buildItem(snack1_['item1']) + "\n2. " + buildItem(
snack1_['item2']) + "\nכמות קלוריות -> " + str(snack1_calories) + "\n\n*ארוחת ביניים 2*:\n1." + buildItem(
snack2_['item1']) + "\n2. " + buildItem(
snack2_['item2']) + "\nכמות קלוריות -> " + str(
snack2_calories) + "\n\n", snack1_calories + snack2_calories, carb1 + carb2, protein1 + protein2, fat1 + fat2
return "*ארוחת ביניים *:\n1. " + buildItem(snack1_['item1']) + "\n2. " + buildItem(
snack2_['item1']) + "\nכמות קלוריות -> " + str(
snack1_calories) + "\n", snack1_calories, carb1, protein1, fat1
def buildItem(item):
if item[0] is not 'NaN' and item[2] is not 'Nan' and item[0] is not 'nan' and item[2] is not 'nan':
item_temp = item[0]
amount = str(item[4])
if ' ' in item[2]:
unit_temp = item[2].split(' ')[0]
else:
unit_temp = item[2]
if '.0' in amount:
amount = amount[:amount.find('.')]
return str(item[0]) + " " + str(item[3]) + " " + unitHebrew(item[2], item[
3]) + ' (' + unit_temp + ' אחת -> ' + amount + ' גרם)'
def unitHebrew(unit, amount):
unit_dic = {"כף": 'כפות', "מנה": 'מנות', "יחידה קטנה": 'יחידות קטנות', "פרח": 'פרחים',
"פרוסה בינונית": 'פרוסות בינונוית',
"יחידה": 'יחידות', "כף גדושה": 'כפות גדושות',
"פרוסה": 'פרוסות', "מנה קטנה": 'מנות קטנות', "יחידה בינונית": 'יחידות בינוניות', "כפית": 'כפיות',
"כוס": 'כוסות', "כוס קצוץ": 'כוסות'}
if unit not in unit_dic:
return unit
if amount > 1:
unit_temp = unit_dic[unit].strip()
if unit_temp.count(' ') == 1:
return unit_temp
unit_temp = unit_temp.replace(' ', '')
unit_temp = unit_temp.replace(' ', '')
unit_temp = unit_temp[:unit_temp.find('ת') + 1] + ' ' + unit_temp[unit_temp.find('ת') + 1:]
# one word
if unit_temp.count('ת') == 1:
return unit_temp.strip()
return unit_temp
return unit
def get_calories(df_nutrition, items):
# calculating the cake diagram feature
# 1 gram fat is 9 calories
# 1 gram protein is 4 calories
# 1 gram carb is 4 calories
# item[0]-> food name
# item[1]-> serail number
# item[2]-> unit
# item[3]-> amount
# item[4]-> Weight
CARBS_GRAMS_CALOIRES = 4
PROTEIN_GRAMS_CALOIRES = 4
FAT_GRAMS_CALOIRES = 9
carbs = 0
protein = 0
fat = 0
count = 1
for _, item in items.items():
item_serail_number = str(item[1])
nutritional_value = item[3] * (item[4] / 100)
carbs_temp = df_nutrition[df_nutrition['smlmitzrach'] == item_serail_number]['carbs'].iloc[0]
protein_temp = df_nutrition[df_nutrition['smlmitzrach'] == item_serail_number]['protein'].iloc[0]
fat_temp = df_nutrition[df_nutrition['smlmitzrach'] == item_serail_number]['total_fat'].iloc[0]
carbs += carbs_temp * nutritional_value
protein += protein_temp * nutritional_value
fat += fat_temp * nutritional_value
count += 1
# calulate the Nutritional values of the meal
carbs = int(carbs)
protein = int(protein)
fat = int(fat)
calories = round(carbs * CARBS_GRAMS_CALOIRES + protein * PROTEIN_GRAMS_CALOIRES + fat * FAT_GRAMS_CALOIRES, 2)
return protein, fat, carbs, calories
def core_fun(meal_type, title=""):
global snacks, user_params, units_thr, type_thr, budget_weights_meals, budget_weights_snacks_fruits_fat, budget_weights_savoury_snacks, budget_weights_sweets, inputs, display_user_parameter, debug
global user_meals_num, total_cals, user_snacks_num, candidate_calories, scoring
global df_noa, df_tzameret_food_group, df_weights, df_nutrition
pd.set_option("display.precision", 2)
warnings.filterwarnings("ignore")
# Dictionary that is equivalent to user inputs and filters the df_noa Database based on the inputs
user_params = {'eggs': 'No', # If eggs = Yes, filters out all the meals with eggs
'vegetables': 'No', # If vegetables = Yes, fiters out all meals with vegetables
'fruits': 'No',
# If fruits = Yes, filters out all snacks and meals with fruits and snacks don't have fruits as a category
'dairy': 'No', # If dairy = Yes, filters out all the dairy items
'beef_chicken_fish': 'No',
# If beef_chicken_fish = Yes, filters out all the meals with beef chicken or fish
# For remaining if Yes, filters only the food its for (i.e if kosher = Yes, only shows kosher food)
'kosher': 'Yes',
'halal': 'Yes',
'vegetarian': 'No',
'vegan': 'No',
'ketogenic': 'No',
'paleo': 'No',
'mediterranean': 'No',
'lactose_free': 'No',
'gluten_free': 'No',
'milk_free': 'No',
'wheat_free': 'No',
'egg_free': 'No',
'soy_free': 'No',
'tree_nut_free': 'No',
'peanut_free': 'No',
'fish_free': 'No',
'shellfish_free': 'No'}
# Dictionary to see if want to add certain snack elements to the snacks on the scoreboard
snacks = {'sweets': 'No',
'Savoury_Snacks': 'Yes'}
# Threshold for the build meal to stop looking for another item (If there are only 20 Carb calories left the meal exits the Carb code and moves to Protein):
units_thr = {'Carbs': 25,
'Protein': 10,
'Vegetables': 10,
'Fat': 25,
'Fruits': 25,
'Sweets': 25,
'Savoury_Snacks': 25}
# Another threshold for build meal to stop looking for another item in the category if there is less budget than the threshold:
type_thr = {'Carbs': 25,
'Protein': 10,
'Vegetables': 10,
'Fat': 25,
'Fruits': 25,
'Sweets': 25,
'Savoury_Snacks': 25}
# For snacks its either fruits and fat or savoury or sweets
budget_weights_meals = {'Carbs': 0.4,
'Protein': 0.5,
'Vegetables': 0.2}
budget_weights_snacks_fruits_fat = {'Fruits': 0.7,
'Fat': 0.4}
budget_weights_savoury_snacks = {'Savoury_Snacks': 1.1}
budget_weights_sweets = {'Sweets': 1.1}
scoring = {'legacy': False, # legacy scoring system composed of budget utilization
'legacy_nut': True, # legacy scoring system with a bonus based on average nutritional density
'legacy_ene': False, # legacy scroing system with a bonus based on higher energy density
'legacy_nut_ene': False
# legacy scoring system with a bonus based on nutrtion density and energy density with higher density the better
}
# User inputs that control different variables:
inputs = {'for_noa_gid': 2106834268,
# Gid that controls which for noa tab is shown, to switch just paste another Gid
'budget_var': 0.3, # Budget variable to see the weighting for snacks and individual meals
'item_thr': 4,
# Threshold used to decided when to break code if there is less than 5 total budget left
'max_items_meal': 4, # Max amount of items per meal
'max_items_snack': 2, # Max amount of items per snack
'penalty_weight': 1,
# Penalty weight for the meal score if the meal doesnt take the first option at the intersection of mida max amount meal
'nutrition_bonus': 0.1, # Bonus multiplier for the average nutrition density
'energy_bonus': 0.2, # Bonus multiplier for the average energy density
'meal_similarity_penalty': 0.3,
# Penalty for having mutliple of the same category of meal items in the same meal
'max_iter': 7, # Number of meals for each meal type in the scoreboard
'meals_num': 3, # Number of different meal types and meals - will always be 3
'snacks_num': 2, # number of snacks in the final scoreboard
'meat_egg_same_day_penalty': 0.2,
# Peanlty if the top meal has eggs or meat and another meal the same day also has eggs and meat
'extra_penalty': 0.2, # Penalty if there is less than 0.7 of each category for the budget is used
'meal_penalty_length': 0.1,
# Penalty given if a meal is longer than 4 items and this is the weighting
'total_cals': 2000 # total calories in the budget for the day
}
debug = {'debug_en': True} # Used for finding bugs in code. Set to True for code to run properly
# Toggle to show the user values in a DataFrame
display_user_parameter = {'display_user_parameter': False}
df_noa, df_tzameret_food_group, df_weights, df_nutrition = import_sheets(False)
df_main = build_meal_wrapper()
items, carbs, protein, fat = displayMeal(df_main, meal_type, inputs['max_items_meal'],
inputs['max_items_snack'], df_nutrition)
data = {'חלבון': protein,
'פחמימות': carbs,
'שומן': fat}
items_temp = items.split('\n')
items2 = ''
for line in items_temp:
if 'nan' not in line and 'nan nan nan' not in line:
if 'ארוחת' in line or 'סך' in line:
line = '\n' + line
items2 += line + '\n'
url = iniliatize_Diagram(title, data)
return items, url
def check_intent_key(tracker, max_iter=10):
list_intents = []
check_intent = ['nutrition_meal_question', 'nutrition_many_xy_in_food']
index = 1
for event in tracker.events:
if event.get("event") == "user":
intent = event.get("parse_data").get("intent").get("name")
list_intents.append(intent)
if index == max_iter:
break
for intent in reversed(list_intents):
if intent in check_intent:
return intent
return ''
# ------------------------------------------------------------------
class OtherOptions(Action):
def name(self) -> Text:
return "action_nutrition_other_options"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
intents_dict = {"nutrition_many_xy_in_food": "action_nutrition_many_xy_in_food",
"nutrition_meal_question": "action_nutrition_meal_question"}
next_action = None
previous_intent = None
intent = check_intent_key(tracker)
if intent == 'nutrition_meal_question':
previous_intent = intent
next_action = intents_dict[previous_intent]
if intent == 'nutrition_many_xy_in_food':
previous_intent = intent
next_action = intents_dict[previous_intent]
return [FollowupAction(next_action), SlotSet("previous_intent", previous_intent)]
# ------------------------------------------------------------------
class Actionnutritionalvalues(Action):
def name(self) -> Text:
return "action_nutritional_values_food"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
prediction = tracker.latest_message
entity = prediction['entities'][0]['value']
# db_dict = load_db_googleSheet(0x13)
db_dict = get_tables('0x13')
dietary_fiber_dict = {'food_energy': ['אנרגיה', 'קלוריות'],
'total_fat': ['שומן', 'גרם'],
'carbohydrates': ['פחמימות', 'גרם'],
'protein': ['חלבון ', 'גרם'],
'total_sugars': ['סוכר', 'גרם'],
'iron': ['ברזל', 'מ"ג'],
'calcium': ['סידן', 'מ"ג'],
'sodium': ['נתרן ', 'מ"ג'],
'total_dietary_fiber': ['סיבים תזונתיים', 'גרם'],
'vitamin_c': ['ויטמין סי', 'מ"ג'],
'vitamin_b12': ['ויטמין בי 12', 'מק"ג']}
db_df = db_dict['tzameret']
entity_name = db_dict['common_food'][db_dict['common_food'].index == entity]['shmmitzrach'].iloc[0]
res = '*ערכים תזונתיים של ' + entity_name + ' ל100 גרם ' + ':*\n'
smlmitzrach_number = int(
db_dict['common_food'][db_dict['common_food'].index == entity]['smlmitzrach'].iloc[0])
values = db_df[db_df['smlmitzrach'] == str(smlmitzrach_number)].iloc[0]
for dietary, list in dietary_fiber_dict.items():
value = values[dietary]
res += '- ' + list[0] + ': ' + str(round(value, 1)) + ' ' + list[1] + '\n'
res = res_timer(res, tracker)
except Exception as e:
res = 'אין לי מושג כמה, מצטער!'
res = res_error(res, tracker, e)
dispatcher.utter_message(res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class Actionnutritionmanyxyinfood(Action):
def name(self) -> Text:
return "action_nutrition_many_xy_in_food"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
user_intent = tracker.latest_message.get('intent').get('name')
db_dict = load_db_googleSheet(0x3)
# db_dict = get_tables('0x3')
lut = db_dict['lut']
tzameret = db_dict['tzameret']
user_msg = tracker.latest_message.get('text')
count = 1
iteation_count = 1
index = 1
food_groups_numbers_tzameret_dict = {}
start = 0
number_of_food = 35
food_list_babies = ['מטרנה', 'סימילק', 'תמ"ל', 'תמ"י', 'תמל', 'תמי', 'סימילאק', 'אבקה']
# get the indexes of the foods
for serail in tzameret['smlmitzrach']:
index += 1
if iteation_count == 1:
start = index
iteation_count += 1
else:
serail_temp = int(serail)
serail_temp += 1
serail_temp = str(serail_temp)
if serail_temp[0] != str(count):
end = index
food_groups_numbers_tzameret_dict[count] = [start, end]
count += 1
iteation_count = 1
if serail_temp[0] == 8 or serail_temp[0] == 9:
break
try:
if user_intent != "nutrition_many_xy_in_food":
x = tracker.get_slot('x') if tracker.get_slot('x') else None
nut1_temp = x[0]
nut2_temp = x[1]
else:
prediction = tracker.latest_message
entity = prediction['entities'][0]['value']
# get the entity from the question
if entity is None:
if 'יש' in user_msg:
entity = user_msg[user_msg.find('יש') + 2:]
if 'הרבה' in user_msg:
entity = user_msg[user_msg.find('הרבה') + 4:]
for r in (("יש", ""), ("הרבה", ""), ("וגם", ""), (" ", " "), ("בהם", "")):
entity = entity.replace(*r).strip()
if ' ' in entity:
entity = entity.replace(' ', ' ')
if entity.count(' ') > 1:
list = entity.split(' ')
if 'ויטמין' == list[0]:
nut1 = list[0] + ' ' + list[1]
nut2 = list[2]
if 'ויטמין' == list[1]:
nut1 = list[1] + ' ' + list[2]
nut2 = list[0]
else:
nut1, nut2 = entity.split(' ')
if nut2[0] == 'ו':
nut2 = nut2[1:]
nut1_temp = nut1.strip()
nut2_temp = nut2.strip()
# get the entity in english
nut1 = lut[lut.index == nut1_temp]['Entity'].values[0]
nut2 = lut[lut.index == nut2_temp]['Entity'].values[0]
tzameret = tzameret[['shmmitzrach', nut1, nut2]]
df = []
for i in range(1, len(food_groups_numbers_tzameret_dict) + 1):
for j in range(1, number_of_food):
index_temp = random.randint(food_groups_numbers_tzameret_dict[i][0],
food_groups_numbers_tzameret_dict[i][1])
db_food_temp = tzameret[tzameret.index == index_temp]
item_name = db_food_temp['shmmitzrach'].iloc[0]
if any(x in item_name for x in food_list_babies):
continue
else:
nut1_value = db_food_temp[nut1].values[0]
nut2_value = db_food_temp[nut2].values[0]
if nut1_value != 0 and nut2_value != 0:
db_food_temp.insert(3, 'sum', db_food_temp[nut1].values[0] + db_food_temp[nut2].values[0])
df.append(db_food_temp)
db_food = pd.concat(df)
db_food = db_food.drop_duplicates(subset='shmmitzrach')
db_food_nut1 = db_food.sort_values(by=[nut1], ascending=False).head(5)
db_food_nut2 = db_food.sort_values(by=[nut2], ascending=False).head(5)
db_food_nut1_nut2 = db_food.sort_values(by=['sum'], ascending=False).head(5)
res1 = ' במאכלים הבאים יש הרבה ' + nut1_temp + '\n'
res2 = ' במאכלים הבאים יש הרבה ' + nut2_temp + '\n'
res3 = ' במאכלים הבאים יש הרבה ' + nut1_temp + ' ו' + nut2_temp + '\n'
res1 += getcolums_FromDataFrame(db_food_nut1, 'shmmitzrach')
res2 += getcolums_FromDataFrame(db_food_nut2, 'shmmitzrach')
res3 += getcolums_FromDataFrame(db_food_nut1_nut2, 'shmmitzrach')
res = res1 + '\n\n' + res2 + '\n\n' + res3
res = res_timer(res, tracker)
except Exception as e:
res = "אין לי מושג כמה, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(res)
return [SlotSet("x", [nut1_temp, nut2_temp]), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class Actionhowmanyxyinz(Action):
def name(self) -> Text:
return "action_nutrition_howmanyxyinz"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
user_msg = tracker.latest_message.get('text')
two_nutrient = None
z = None
# db_dict = load_db_googleSheet(0x293)
db_dict = get_tables('0x293')
prediction = tracker.latest_message
two_nutrient = prediction['entities'][0]['value']
x, y = two_nutrient.split(' ו')
x = x.strip()
y = y.strip()
regex_res = re.search('כמה (.*) יש ב(.*)', user_msg.replace('?', ''))
if regex_res:
if two_nutrient is None:
x, y = regex_res.group(1)
x = x.strip()
y = y.strip()
z = regex_res.group(2)
regex_res = re.search('כמה (.*) ב(.*)', user_msg.replace('?', ''))
if regex_res:
if two_nutrient is None:
x, y = regex_res.group(1)
x = x.strip()
y = y.strip()
z = regex_res.group(2)
regex_res = re.search('מה הכמות של (.*) ב(.*)', user_msg.replace('?', ''))
if regex_res:
if two_nutrient is None:
x, y = regex_res.group(1)
x = x.strip()
y = y.strip()
z = regex_res.group(2)
y = y[:len(y)]
# get the units from the user message
user_msg_temp = user_msg[user_msg.find(two_nutrient) + len(two_nutrient) + 1:len(user_msg)].replace('?', '')
food1_units = "100 גרם"
regex_units_res1 = re.search('ב(.*) של', user_msg_temp)
regex_units_res2 = re.search(' (.*) של', user_msg_temp)
if regex_units_res1:
food1_units = regex_units_res1.group(1)
elif regex_units_res2:
food1_units = regex_units_res2.group(1)
if food1_units in db_dict['food_units_aliases']['Unit Alias'].values:
food1_units = db_dict['food_units_aliases'][db_dict['food_units_aliases']['Unit Alias'] == food1_units][
'Zameret unit'].values[0]
val1, res1 = how_many_x_in_y_core(x, z, food1_units, self.name(), tracker)
val2, res2 = how_many_x_in_y_core(y, z, food1_units, self.name(), tracker)
res1 = checkDoublePattern(res1, 'קלוריות')
res2 = checkDoublePattern(res2, 'קלוריות')
res1 = checkPrecentinres(res1, x)
res2 = checkPrecentinres(res2, y)
res = ''
res += res1
res += "\n"
res += res2
res = res_timer(res, tracker)
except Exception as e:
res = "אין לי מושג כמה, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class Actioncompartiontwofoods(Action):
def name(self) -> Text:
return "action_nutrition_compare_foods"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
user_msg = tracker.latest_message.get('text')
entities = tracker.latest_message.get('entities')
x = None
y1 = None
y2 = None
more_or_less = 'יותר' if 'יותר' in user_msg else 'פחות'
# db_dict = load_db_googleSheet(0x293)
db_dict = get_tables('0x293')
for ent in entities:
if ent['entity'] in db_dict['lut']["action_nutrition_compare_foods"].values:
x = ent['value']
elif ent['entity'] in db_dict['lut']["action_nutrition_compare_foods"].values:
y1, y2 = ent['value'].split('או')
y1 = y1.strip()
y2 = y2.strip()
y1, y2 = user_msg[user_msg.find(x) + len(x):len(user_msg)].replace('?', '').split(' או ')
y1 = y1.strip()
y2 = y2.strip()
if '-' in user_msg:
y1 = y1[2:]
else:
y1 = y1[1:]
if 'בב' in y1 or (y1[0] == 'ב' and y1[1] != 'ב' and 'בשר' not in y1):
y1 = y1[1:len(y1)]
if 'בב' in y2 or (y2[0] == 'ב' and y2[1] != 'ב' and 'בשר' not in y2):
y2 = y2[1:len(y2)]
if not y1 or not y2:
regex_res = re.search('במה יש (פחות|יותר) .* ב(.*)', user_msg.replace('?', ''))
if regex_res:
more_or_less = regex_res.group(1)
y1, y2 = regex_res.group(2).split('או')
y1 = y1.strip()
y2 = y2.strip()
food1_units = "100 גרם"
food2_units = "100 גרם"
for k, y in enumerate([y1, y2]):
regex_units_res = re.search('(.*) של (.*)', y)
if regex_units_res:
if k == 0:
food1_units = regex_units_res.group(1)
y1 = regex_units_res.group(2)
else:
food2_units = regex_units_res.group(1)
y2 = regex_units_res.group(2)
if food1_units in db_dict['food_units_aliases']['Unit Alias'].values:
food1_units = \
db_dict['food_units_aliases'][db_dict['food_units_aliases']['Unit Alias'] == food1_units][
'Zameret unit'].values[0]
if food2_units in db_dict['food_units_aliases']['Unit Alias'].values:
food2_units = \
db_dict['food_units_aliases'][db_dict['food_units_aliases']['Unit Alias'] == food2_units][
'Zameret unit'].values[0]
val1, res1 = how_many_x_in_y_core(x, y1, food1_units, self.name(), tracker)
val2, res2 = how_many_x_in_y_core(x, y2, food1_units, self.name(), tracker)
res1 = checkDoublePattern(res1, 'קלוריות')
res2 = checkDoublePattern(res2, 'קלוריות')
ys = (y1, y2)
vals = (val1, val2)
res = 'ב%s יש %s %s' % (ys[np.argmax(vals) if more_or_less == 'יותר' else np.argmin(vals)], more_or_less, x)
if 'ב ב' in res and 'בבשר' not in res:
res = res[1:]
res += "\n"
res += res1
res += "\n"
res += res2
res = res_timer(res, tracker)
except Exception as e:
res = "אין לי מושג כמה, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class Actionwhataboutx(Action):
def name(self) -> Text:
return "action_nutrition_and_what_about_x"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
# get the right actions according to the intent
intens_dict = {"nutrition_howmanyxiny": "action_nutrition_howmanyxiny",
"nutrition_meal_question": "action_nutrition_meal_question",
"nutrition_is_food_healthy": "action_nutrition_is_food_healthy",
"nutrition_get_rda": "action_nutrition_get_rda",
"nutrition_get_upper_limit": "action_nutrition_get_rda"}
user_messge = tracker.latest_message.get('text')
previous_intent = tracker.get_slot('previous_intent')
entity_value = None
slot = "x"
next_action = intens_dict[previous_intent]
# meal question
if previous_intent == "nutrition_meal_question":
return [FollowupAction(next_action),
SlotSet(slot, user_messge), SlotSet('previous_intent', 'nutrition_and_what_about_x')]
# ------------------------------------------------
entity = None
entity_value = None
# db_dict = load_db_googleSheet(0x2)
db_dict = get_tables('0x2')
lut_df = db_dict['lut']
nutrients = lut_df['Entity'].head(79)
# check if rasa detect the entity
if len(tracker.latest_message.get('entities')) != 0:
prediction = tracker.latest_message
entity_value = prediction['entities'][0]['value']
entity = prediction['entities'][0]['entity']
if entity_value is None:
if 'ברזל' in user_messge:
entity_value = 'ברזל'
entity = 'nutrient'
elif user_messge[0] == 'ו' and user_messge[1] != 'ב':
entity_value = user_messge[1:]
else:
entity_value = user_messge[2:]
if entity_value is None or entity_value == "":
entity_value = user_messge
# how many x in y
if previous_intent == "nutrition_howmanyxiny":
# rasa succeed to detect the entity
if entity is not None:
if entity == 'nutrient':
slot = "x"
else:
slot = "y"
# the entity value is taken from the user message
else:
if entity_value in nutrients:
slot = "x"
else:
slot = "y"
return [FollowupAction(next_action), SlotSet(slot, entity_value),
SlotSet('previous_intent', 'nutrition_and_what_about_x')]
return [FollowupAction(next_action),
SlotSet(slot, entity_value)]
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionWhatXCanBeInY(Action):
def name(self) -> Text:
return "action_nutrition_what_xcanbeiny"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
negative_words_list = ['ללא', 'לא', 'בלי', 'וללא']
entities_temp = {}
meal = ""
# db_dict = load_db_googleSheet(0x402)
db_dict = get_tables('0x402')
lut = db_dict['lut']
df_noa = db_dict['food_units_features']
message = tracker.latest_message.get('text')
# get the meal type
if 'בוקר' in message:
meal = "IL_Breakfast"
if 'צהריים' in message or 'צהרים' in message:
meal = "IL_Lunch"
if 'ערב' in message:
meal = 'IL_Dinner'
# get the entity value from the bot
prediction = tracker.latest_message
entities_list = prediction['entities']
if len(entities_list) == 1:
entity = entities_list[0]['value']
if ' ' not in entity:
entities = [entity]
filter_type = any(ele in message for ele in negative_words_list)
for index, ent in enumerate(entities):
entities_temp[index] = fliter_type(ent, lut, filter_type)
else:
entities_temp = get_entity_filters(entity, negative_words_list, lut)
else:
for index in range(len(entities_list)):
entity = entities_list[index]['value']
entities_temp_2 = get_entity_filters(entity, negative_words_list, lut)
entities_temp.update(entities_temp_2)
# get the food accroding to the user selection
items = df_noa.loc[df_noa[meal] == 'Yes']
for key, value in entities_temp.items():
items = items.loc[items[value[0]] == value[1]]
# get the items by ranmdom 5 of them
indeX = items.index.tolist()
res = ""
for i in range(1, 7):
temp = random.randint(0, len(items) - 1)
res += str(i) + ". " + str(items[items.index == indeX[temp]]['Food_Name'].values[0]) + "\n"
res = res_timer(res, tracker)
dispatcher.utter_message(text=res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionMealQuestion(Action):
def name(self) -> Text:
return "action_nutrition_meal_question"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
meal = []
previous_intent = ""
message = None
words = ['בוקר', 'צהריים', 'ערב', 'יומי', 'יום', 'תפריט']
user_intent = tracker.latest_message.get('intent').get('name')
if user_intent != "nutrition_meal_question":
x = tracker.get_slot('x') if tracker.get_slot('x') else None
if x is not None:
if tracker.get_slot('previous_intent') == "nutrition_other_options" and any(
ele in string for ele in words):
message = x
else:
message = tracker.latest_message.get('text') if tracker.latest_message.get('text') else None
title = ''
if message is None:
# get the message from the user in the meal action
message = tracker.get_slot('x') if tracker.get_slot('x') else None
if message is not None:
if 'בוקר' in message:
meal = ['breakfast']
title = 'ארוחת בוקר'
elif 'צהריים' in message or 'צהרים' in message:
meal = ['lunch']
title = 'ארוחת צהריים'
elif 'ערב' in message:
meal = ['dinner']
title = 'ארוחת ערב'
elif 'יומי' in message or 'תפריט' in message or 'יום' in message or 'תפריט' in message:
meal = ['breakfast', 'lunch', 'dinner']
title = 'תפריט יומי'
res, url = core_fun(meal, title)
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res, image=url)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", title), SlotSet("y", None), SlotSet("previous_intent", "nutrition_meal_question")]
# ------------------------------------------------------------------
class ActionTimer(Action):
def name(self) -> Text:
return "action_timer"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
timer_state_str = "n/a"
for ent in tracker.latest_message.get('entities'):
if ent['entity'] == 'timer_state':
timer_state_str = ent['value']
break
if tracker.latest_message.get('intent').get('name') == 'timer_start':
timer_state_str = 'on'
dispatcher.utter_message(text="מצב הטיימר עודכן בהצלחה (%s)" % timer_state_str)
return [SlotSet("timer_state", timer_state_str)]
# ------------------------------------------------------------------
class ActionSimpleQuestion(Action):
def name(self) -> Text:
return "action_simple_question"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
# db_dict = load_db_googleSheet(0x6)
db_dict = get_tables('0x6')
lut_df = db_dict['lut']
user_intent = tracker.latest_message.get('intent').get('name')
simple_entity = None
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name()].values and ent['value'] in lut_df['Entity']:
simple_entity = ent['value']
res = simpleQuestionAnswer(tracker, simple_entity, db_dict, user_intent)
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionGetRDAQuestion(Action):
def name(self) -> Text:
return "action_nutrition_get_rda"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
user_intent = tracker.latest_message.get('intent').get('name')
intent_upper = user_intent == 'nutrition_get_upper_limit'
previous_intent = tracker.get_slot('previous_intent') if tracker.get_slot('previous_intent') else None
if previous_intent == "nutrition_get_upper_limit" or previous_intent == "nutrition_get_rda":
intent = previous_intent
else:
intent = user_intent
rda_val, rda_units, rda_text, rda_status, nutrient, image = get_rda(self.name(), tracker,
intent_upper)
if rda_val > 0:
intent_upper_str = "המקסימלית" if intent_upper else "המומלצת"
res = "הקצובה היומית %s של %s %s היא\r %.2f %s" % \
(intent_upper_str, nutrient, get_personal_str(rda_status, tracker), rda_val, rda_units)
res += "\r"
res += rda_text if rda_text else ""
else:
if rda_text:
res = rda_text
else:
res = "אין לי מושג, מצטער!"
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res, image=image)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("previous_intent", intent), SlotSet("x", ""), SlotSet("y", "")]
# ------------------------------------------------------------------
class ActionNutritionHowManyXinY(Action):
def name(self) -> Text:
return "action_nutrition_howmanyxiny"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
db_dict = get_tables('0x293')
# db_dict = load_db_googleSheet(0x293)
df_tzameret_food_group = pd.read_csv(
"https://docs.google.com/spreadsheets/d/19rYDpki0jgGeNlKLPnINiDGye8QEfQ4IEEWSkLFo83Y/export?format=csv&gid=428717261")
db_df = db_dict['tzameret']
lut_df = db_dict['lut']
common_df = db_dict['common_food']
units_df = db_dict['food_units']
units_aliases_df = db_dict['food_units_aliases']
y = None
x = None
user_msg = tracker.latest_message.get('text')
user_intent = tracker.latest_message.get('intent').get('name')
intent_upper = user_intent == 'nutrition_get_upper_limit'
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
# Fetch X and Y (from slots, from entities or from regex):
if tracker.get_slot('previous_intent') == 'nutrition_and_what_about_x':
x = tracker.get_slot('x') if tracker.get_slot('x') else None
if tracker.latest_message.get('entities') or tracker.get_slot('y'):
y = tracker.get_slot('y') if tracker.get_slot('y') else None
name_xy = self.name() + "_x"
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name() + "_x"].values:
x = ent['value']
name_xy = self.name() + "_x"
elif ent['entity'] in lut_df[self.name() + "_y"].values:
y = ent['value']
name_xy = self.name() + "_y"
regex_res = re.search('כמה (.*) יש ב(.*)', user_msg.replace('?', ''))
if regex_res:
x = regex_res.group(1)
y = regex_res.group(2).strip()
if not y:
regex_res = re.search('.* ב(.*)', user_msg.replace('?', ''))
if regex_res:
y = regex_res.group(1).strip()
if not y or not x:
user_msg_temp = user_msg[user_msg.find('כמה') + 3:]
regex_res = re.search('(.*) ב(.*)', user_msg_temp.replace('?', ''))
if regex_res:
x = regex_res.group(1).strip()
y = regex_res.group(2).strip()
food_units = "גרם"
regex_units_res = re.search('(.*) של (.*)', y) if y else None
if regex_units_res:
food_units = regex_units_res.group(1)
y = regex_units_res.group(2)
if food_units in units_aliases_df['Unit Alias'].values:
food_units = units_aliases_df[units_aliases_df['Unit Alias'] == food_units]['Zameret unit'].values[
0]
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
y_common = y
if y in common_df.index:
y_common = common_df[common_df.index == y]['shmmitzrach'][0]
else:
y_food = ' '.join(y.split(' ')[1:])
food_units = units_aliases_df[units_aliases_df['Unit Alias'] == y.split(' ')[0]]['Zameret unit']
if food_units.empty:
food_units = y.split(' ')[0]
else:
food_units = food_units.values[0]
if y_food in common_df.index:
y_common = common_df[common_df.index == y_food]['shmmitzrach'][0]
else:
y_common = y_food
food = db_df[db_df['shmmitzrach'].str.contains(y_common)].iloc[0, :]
feature = lut_df[lut_df.index == x]["Entity"][0]
units = lut_df[lut_df.index == x]["Units"][0]
first_digit_in_smlmitzrach = int(food['smlmitzrach'][0])
mida = \
df_tzameret_food_group[df_tzameret_food_group['ספרה ראשונה בקוד'] == first_digit_in_smlmitzrach].iloc[
0][
'mida_maxAmount_meal']
mida = re.sub('_[123456789]', '', mida)
mida = mida.replace(' ', '')
mida = mida.split(',')
units_df = units_df[units_df['smlmitzrach'] == int(food['smlmitzrach'])]
shmmida = ''
mishkal = ''
for i in mida:
if int(i) in units_df['mida'].values:
shmmida = units_df[units_df['mida'] == int(i)]['shmmida'].iloc[0]
mishkal = units_df[units_df['mida'] == int(i)]['mishkal'].iloc[0]
food_units_factor = int(mishkal) / 100
fat_calories = round(food['total_fat'] * food_units_factor, 2)
protein_calories = round(food['protein'] * food_units_factor, 2)
carbs_calories = round(food['carbohydrates'] * food_units_factor, 2)
data = {'חלבון': protein_calories,
'פחמימה': carbs_calories,
'שומן': fat_calories}
if x == 'קלוריות':
val = fat_calories * 9 + protein_calories * 4 + carbs_calories * 4
else:
val = food[feature] * food_units_factor
val = round(val, 2)
food_units = "גרם"
mishkal = int(mishkal)
res = 'ב' + shmmida + ' (' + str(int(mishkal)) + ' ' + food_units + ')' + ' של ' + y_common + ' יש '
mishkal = int(mishkal)
mishkal = str(mishkal)
mishkal = mishkal[::-1]
title = 'ב' + shmmida + ' )' + mishkal + ' ' + food_units + '(' + ' של ' + y_common + ' יש '
title = title[:title.find(',')]
res += str(val) + ' ' + units + ' ' + x
rda_val, rda_units, rda_text, rda_status, nutrient, x_1 = get_rda(name_xy, tracker, intent_upper)
if rda_val > 0 and units not in ['יחב"ל']:
rda = 100 * float(val) / rda_val
intent_upper_str = "המקסימלית" if intent_upper else "המומלצת"
res += "\r"
res += "שהם כ-%d אחוז מהקצובה היומית %s %s" % (
int(rda), intent_upper_str, get_personal_str(rda_status, tracker))
res += "\r"
res += rda_text if rda_text else ""
res = checkDoublePattern(res, 'קלוריות')
res = res_timer(res, tracker)
res = checkPrecentinres(res, x)
if ' ' in y.strip():
title = 'ב' + y
else:
title = re.sub('[1234567890%)(]', '', title)
title = title.replace('גרם', '')
title = title.replace('גרמים', '')
title = title.replace('יש', '')
title = title[title.find('של') + 2:]
title = re.sub(' ', '', title)
title = title.strip()
title = 'ב' + title
title = title.strip()
url = iniliatize_Diagram(title, data)
dispatcher.utter_message(text="%s" % res, image=url)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", x), SlotSet("y", y), SlotSet("previous_intent", "nutrition_howmanyxiny")]
# ------------------------------------------------------------------
class ActionIsFoodHealthyQuestion(Action):
def name(self) -> Text:
return "action_nutrition_is_food_healthy"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
# db_dict = load_db_googleSheet(0x33)
db_dict = get_tables('0x33')
db_df = db_dict['tzameret']
lut_df = db_dict['lut']
common_df = db_dict['common_food']
food_ranges_df = db_dict['food_ranges']
food = ""
food_entity = ""
x = tracker.get_slot('x') if tracker.get_slot('x') else None
if x is not None and x is not "":
food = x
food_entity = x
else:
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name()].values:
food_entity = ent['value']
food = food_entity
break
if food in common_df.index:
food = common_df[common_df.index == food]['shmmitzrach'][0]
food = db_df[db_df['shmmitzrach'].str.contains(food)].iloc[0, :]
_, nutrition_density_res = get_food_nutrition_density(food, food_ranges_df)
advantages = []
disadvantages = []
for idx, row in food_ranges_df.iterrows():
if row["tzameret_name"]:
if row["good_or_bad"] == "good":
value = float(food[row["tzameret_name"]])
if idx == "Protein":
threshold = 250
else:
threshold = float(row["Medium - threshold per 100gr"])
if value > threshold:
advantages.append(row["hebrew_name"])
elif row["good_or_bad"] == "bad":
value = float(food[row["tzameret_name"]])
if idx == "Protein":
threshold = 250
else:
threshold = float(row["High - threshold per 100gr"])
if value > threshold:
disadvantages.append(row["hebrew_name"])
nutrition_density_normalized = float(food["Nutrition density normalized"])
if nutrition_density_res == "low":
res = "ב%s יש צפיפות תזונתית (רכיבים תזונתיים טובים ביחס לקלוריות) נמוכה" % food_entity
elif nutrition_density_res == "med":
res = "ב%s יש צפיפות תזונתית (רכיבים תזונתיים טובים ביחס לקלוריות) בינונית" % food_entity
elif nutrition_density_res == "high":
res = "ב%s יש צפיפות תזונתית (רכיבים תזונתיים טובים ביחס לקלוריות) גבוהה" % food_entity
if disadvantages:
res += ". "
res += "החסרונות של %s הם הרבה %s" % (food_entity, ", ".join(disadvantages))
if advantages:
res += ". "
res += "היתרונות של %s הם הרבה %s" % (food_entity, ", ".join(advantages))
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("previous_intent", "nutrition_is_food_healthy"), SlotSet("x", ""), SlotSet("y", "")]
# ------------------------------------------------------------------
class ActionWhatIsHealthierQuestion(Action):
def name(self) -> Text:
return "action_nutrition_what_is_healthier"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
# db_dict = load_db_googleSheet(0x33)
db_dict = get_tables('0x33')
db_df = db_dict['tzameret']
lut_df = db_dict['lut']
common_df = db_dict['common_food']
food_ranges_df = db_dict['food_ranges']
user_msg = tracker.latest_message.get('text')
food_entity1 = None
food_entity2 = None
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name() + "_x"].values:
food_entity1 = ent['value']
elif ent['entity'] in lut_df[self.name() + "_y"].values:
food_entity2 = ent['value']
if not food_entity2:
regex_res = re.search('.* או (.*)', user_msg.replace('?', ''))
if regex_res:
food_entity2 = regex_res.group(1).strip()
nutrition_density_cmp = []
advantages_cmp = []
disadvantages_cmp = []
for food_entity in (food_entity1, food_entity2):
food = food_entity
if food in common_df.index:
food = common_df[common_df.index == food]['shmmitzrach'][0]
food = db_df[db_df['shmmitzrach'].str.contains(food)].iloc[0, :]
nutrition_density, _ = get_food_nutrition_density(food, food_ranges_df)
advantages = []
disadvantages = []
for idx, row in food_ranges_df.iterrows():
if row["tzameret_name"]:
if row["good_or_bad"] == "good":
value = float(food[row["tzameret_name"]])
if idx == "Protein":
threshold = 250
else:
threshold = float(row["Medium - threshold per 100gr"])
if value > threshold:
advantages.append(row["hebrew_name"])
elif row["good_or_bad"] == "bad":
value = float(food[row["tzameret_name"]])
if idx == "Protein":
threshold = 250
else:
threshold = float(row["High - threshold per 100gr"])
if value > threshold:
disadvantages.append(row["hebrew_name"])
nutrition_density_cmp.append(float(food["Nutrition density normalized"]))
if disadvantages:
res_temp = '*החסרונות של ' + food_entity + '*\n'
res_temp += "%s" % (", ".join(disadvantages))
disadvantages_cmp.append(res_temp + '\n\n')
if advantages:
res_temp = '*היתרונות של ' + food_entity + '*\n'
res_temp += "%s" % (", ".join(advantages))
advantages_cmp.append(res_temp + '\n\n')
if nutrition_density_cmp[0] > nutrition_density_cmp[1]:
res_temp = "לפי צפיפות תזונתית %s עדיף על פני %s\r" % (food_entity1, food_entity2)
elif nutrition_density_cmp[0] < nutrition_density_cmp[1]:
res_temp = "לפי צפיפות תזונתית %s עדיף על פני %s\r" % (food_entity2, food_entity1)
else:
res_temp = "לפי צפיפות תזונתית %s ו-%s שקולים\r" % (food_entity1, food_entity2)
if nutrition_density_cmp[0] < nutrition_density_cmp[1]:
advantages_cmp.reverse()
disadvantages_cmp.reverse()
res = res_temp
res += '\n\n\n'
for advantage in advantages_cmp:
if advantage:
res += "%s\n\r" % advantage
for disadvantage in disadvantages_cmp:
if disadvantage:
res += "%s\n\r" % disadvantage
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionWhatIsRecommendedQuestion(Action):
def name(self) -> Text:
return "action_nutrition_is_food_recommended"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
# db_dict = load_db_googleSheet(0x3b)
db_dict = get_tables('0x3b')
db_df = db_dict['tzameret']
lut_df = db_dict['lut']
food_qna_df = db_dict['food_qna']
common_df = db_dict['common_food']
food_ranges_df = db_dict['food_ranges']
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name()].values:
food_entity = ent['value']
break
food = food_entity
if food in common_df.index:
food = common_df[common_df.index == food]['shmmitzrach'][0]
food = db_df[db_df['shmmitzrach'].str.contains(food)].iloc[0, :]
_, nutrition_density_res = get_food_nutrition_density(food, food_ranges_df)
_, nutrition_energy_res = get_food_energy_density(food, food_ranges_df)
description_density_row = food_qna_df[(food_qna_df.index == nutrition_density_res) &
(food_qna_df.energy_density == nutrition_energy_res)]
res = description_density_row['description_density'].values[0]
res = res.replace('var#food', food_entity)
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionEatBeforeTrainingQuestion(Action):
def name(self) -> Text:
return "action_eat_before_training"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
# db_dict = load_db_googleSheet(0x10)
db_dict = get_tables('0x10')
custom_df = db_dict['common_food']
user_intent = tracker.latest_message.get('intent').get('name')
training_type = tracker.get_slot("training_type")
training_duration = tracker.get_slot("training_duration")
if training_type == 'ריצת אינטרוולים':
if training_duration:
res = custom_df['Entity'][training_type + ' מעל ' + training_duration][0]
else:
res = custom_df['Entity'][training_type][0]
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionBloodtestGenericQuestion(Action):
def name(self) -> Text:
return "action_nutrition_bloodtest_generic"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
# db_dict = load_db_googleSheet(0x102)
db_dict = get_tables('0x102')
lut_df = db_dict['lut']
bloodtest_df = db_dict['bloodtest_vals']
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name()].values:
bloodtest_entity = ent['value']
break
feature = db_dict['lut']['Entity'][bloodtest_entity]
gender_str = "Male"
if tracker.get_slot('gender') == "זכר":
gender_str = "Male"
elif tracker.get_slot('gender') == "נקבה":
gender_str = "Female"
age = float(tracker.get_slot('age') if tracker.get_slot('age') else "40")
bloodtest_row = bloodtest_df[(bloodtest_df['Element'] == feature) & \
((bloodtest_df['Gender'] == "ANY") | (
bloodtest_df['Gender'] == gender_str)) & \
((bloodtest_df['Age min'] == "ANY") | (
bloodtest_df['Age min'].replace('ANY', -1).astype(float) <= age)) & \
((bloodtest_df['Age Max'] == "ANY") | (
bloodtest_df['Age Max'].replace('ANY', -1).astype(float) > age))]
bloodtest_type = int(bloodtest_row['Graph type'].values[0])
bloodtest_min = bloodtest_row['Min'].values[0]
bloodtest_thr1 = bloodtest_row['Threshold 1'].values[0]
bloodtest_thr2 = bloodtest_row['Threshold 2'].values[0]
bloodtest_max = bloodtest_row['Max'].values[0]
if bloodtest_type == 1:
res = 'ערך תקין עבור בדיקת %s בין %.2f ועד %.2f, ערך מעל %.2f נחשב חריג' % (
bloodtest_entity, bloodtest_min, bloodtest_thr1, bloodtest_thr2)
elif bloodtest_type == 2:
res = 'ערך תקין עבור בדיקת %s בין %.2f ועד %.2f, ערך מתחת %.2f נחשב חריג' % (
bloodtest_entity, bloodtest_thr2, bloodtest_max, bloodtest_thr1)
elif bloodtest_type == 3:
res = 'ערך תקין עבור בדיקת %s בין %.2f ועד %.2f' % (
bloodtest_entity, bloodtest_thr1, bloodtest_thr2)
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionBloodtestValueQuestion(Action):
def name(self) -> Text:
return "action_nutrition_bloodtest_value"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
# db_dict = load_db_googleSheet(0x102)
db_dict = get_tables('0x102')
lut_df = db_dict['lut']
bloodtest_df = db_dict['bloodtest_vals']
user_msg = tracker.latest_message.get('text')
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in [x for x in lut_df[self.name()].values if x != 0]:
if ent['entity'] == 'integer':
val = ent['value']
else:
bloodtest_entity = ent['value']
if not val:
regex_res = re.search('האם (.*) הוא .*', user_msg.replace('?', ''))
if regex_res:
val = regex_res.group(1).strip()
if not val:
raise Exception()
feature = db_dict['lut']['Entity'][bloodtest_entity]
gender_str = "Male"
if tracker.get_slot('gender') == "זכר":
gender_str = "Male"
elif tracker.get_slot('gender') == "נקבה":
gender_str = "Female"
age = float(tracker.get_slot('age') if tracker.get_slot('age') else "40")
bloodtest_row = bloodtest_df[(bloodtest_df['Element'] == feature) & \
((bloodtest_df['Gender'] == "ANY") | (
bloodtest_df['Gender'] == gender_str)) & \
((bloodtest_df['Age min'] == "ANY") | (
bloodtest_df['Age min'].replace('ANY', -1).astype(float) <= age)) & \
((bloodtest_df['Age Max'] == "ANY") | (
bloodtest_df['Age Max'].replace('ANY', -1).astype(float) > age))]
bloodtest_type = int(bloodtest_row['Graph type'].values[0])
bloodtest_min = bloodtest_row['Min'].values[0]
bloodtest_thr1 = bloodtest_row['Threshold 1'].values[0]
bloodtest_thr2 = bloodtest_row['Threshold 2'].values[0]
bloodtest_max = bloodtest_row['Max'].values[0]
if bloodtest_type == 1:
if bloodtest_min <= float(val) <= bloodtest_thr1:
res = 'כן, זהו ערך תקין עבור בדיקת %s היות והוא נופל בטווח בין %.2f ועד %.2f. ערך מעל %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_min, bloodtest_thr1, bloodtest_thr2)
else:
res = 'לא, זהו אינו ערך תקין עבור בדיקת %s. ערך תקין הינו בטווח בין %.2f ועד %.2f. ערך מעל %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_min, bloodtest_thr1, bloodtest_thr2)
elif bloodtest_type == 2:
if bloodtest_thr2 <= float(val) <= bloodtest_max:
res = 'כן, זהו ערך תקין עבור בדיקת %s היות והוא נופל בטווח בין %.2f ועד %.2f. ערך מתחת %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_thr2, bloodtest_max, bloodtest_thr1)
else:
res = 'לא, זהו אינו ערך תקין עבור בדיקת %s. ערך תקין הינו בטווח בין %.2f ועד %.2f. ערך מתחת %.2f נחשב לחריג' % (
bloodtest_entity, bloodtest_thr2, bloodtest_max, bloodtest_thr1)
elif bloodtest_type == 3:
if bloodtest_thr1 <= float(val) <= bloodtest_thr2:
res = 'כן, זהו ערך תקין עבור בדיקת %s היות והוא נופל בטווח בין %.2f ועד %.2f' % (
bloodtest_entity, bloodtest_thr1, bloodtest_thr2)
else:
res = 'לא, זהו אינו ערך תקין עבור בדיקת %s. ערך תקין הינו בטווח בין %.2f ועד %.2f.' % (
bloodtest_entity, bloodtest_thr1, bloodtest_thr2)
else:
raise Exception()
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionFoodSubstituteQuestion(Action):
def name(self) -> Text:
return "action_nutrition_food_substitute"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
tic()
# db_dict = load_db_googleSheet(0xc33)
db_dict = get_tables('0xc33')
db_df = db_dict['tzameret']
lut_df = db_dict['lut']
features_df = db_dict['food_units_features']
common_df = db_dict['common_food']
food_ranges_df = db_dict['food_ranges']
subs_tags_alias_df = db_dict['subs_tags_alias']
features_df = features_df.drop(index=0)
user_msg = tracker.latest_message.get('text')
food_entity = ""
for ent in tracker.latest_message.get('entities'):
if ent['entity'] in lut_df[self.name()].values:
food_entity = ent['value']
break
if food_entity == "" or food_entity is None:
prediction = tracker.latest_message
food_entity = prediction['entities'][0]['value']
tzameret_groups_lut = {}
tzameret_groups_lut['1'] = ['1', '4'] # Milk
tzameret_groups_lut['2'] = ['1', '2', '3', '4'] # Meat
tzameret_groups_lut['3'] = ['1', '2', '3', '4'] # Eggs
tzameret_groups_lut['4'] = ['1', '4'] # Dairy
tzameret_groups_lut['5'] = ['5', '6', '7', '9'] # Snacks
tzameret_groups_lut['6'] = ['5', '6', '7', '9'] # Fruits
tzameret_groups_lut['7'] = ['5', '6', '7', '9'] # Vegetables
tzameret_groups_lut['8'] = ['8', '4'] # Fat
tzameret_groups_lut['9'] = ['5', '6', '7', '9'] # Beverages
food_energy_thr = 0.05
def get_advantages(food):
advantages = []
for idx, row in food_ranges_df.iterrows():
if row["tzameret_name"] and row["tzameret_name"] in food:
if row["good_or_bad"] == "good":
value = float(food[row["tzameret_name"]])
if idx == "Protein":
threshold = 250
else:
threshold = float(row["Medium - threshold per 100gr"])
if value > threshold:
advantages.append(row["hebrew_name"])
return advantages
def get_advantages_score(food):
act = food['advantages']
ref = ast.literal_eval(food['advantages_ref'])
intersection = []
if isinstance(act, list) and isinstance(ref, list):
intersection = list(set(act) & set(ref))
return len(intersection)
food = food_entity
if food in common_df.index:
food = common_df[common_df.index == food]['shmmitzrach'][0]
food_tzameret = db_df[db_df['shmmitzrach'].str.contains(food)].iloc[0, :]
tzameret_code = int(food_tzameret['smlmitzrach'])
tzameret_code_msb = food_tzameret['smlmitzrach'][0]
food_energy = food_tzameret['food_energy']
food_features = features_df[features_df['smlmitzrach'].fillna(0).astype(int) == tzameret_code]
user_msg_feature_v = []
user_msg_feature_k = list(
set(subs_tags_alias_df.index.to_list()) & set(user_msg.replace(',', '').split(" ")))
for tag in user_msg_feature_k:
tag_df = subs_tags_alias_df[subs_tags_alias_df.index == tag]['Entity']
if tag_df.any:
user_msg_feature_v.append(tag_df.values[0])
food_filter_1 = db_df[db_df['smlmitzrach'].str[0].isin(tzameret_groups_lut[tzameret_code_msb])]
food_filter_2 = db_df[abs(db_df['food_energy'] - food_energy) / food_energy < food_energy_thr]
food_filter_1_2 = pd.merge(food_filter_1, food_filter_2, how='inner')
food_filter_1_2['smlmitzrach'] = food_filter_1_2['smlmitzrach'].astype(float)
features_df['smlmitzrach'] = features_df['smlmitzrach'].astype(float)
food_filter = features_df[features_df['smlmitzrach'].isin(food_filter_1_2['smlmitzrach'].to_list())]
food_filter = food_filter[~food_filter['Food_Name'].str.contains(food_entity)]
for tag in user_msg_feature_v:
food_filter = food_filter[food_filter[tag] == 'Yes']
food_filter = food_filter.reset_index(drop=True)
if food_features.empty:
food_filter['features_score'] = 0
else:
food_features_compact = food_features.iloc[:, 5:-4]
food_filter_compact = food_filter.iloc[:, 5:-4].reset_index(drop=True)
food_features_compact_shaped = pd.DataFrame(
np.repeat(food_features_compact.values, len(food_filter_compact), axis=0))
food_features_compact_shaped.reset_index(drop=True)
food_features_compact_shaped.columns = food_features_compact.columns
food_features_score_df = (food_filter_compact == food_features_compact_shaped).astype(int)
food_filter['features_score'] = food_features_score_df.sum(axis=1)
food_advantages = get_advantages(food_tzameret)
food_filter['advantages'] = food_filter_1_2.apply(get_advantages, axis=1)
food_filter['advantages_ref'] = str(food_advantages)
food_filter['advantages_score'] = food_filter.apply(get_advantages_score, axis=1)
food_filter = food_filter.sort_values(['features_score', 'advantages_score'], ascending=False)
res = "להלן 5 התחליפים הקרובים ביותר עבור %s" % food_entity
res += "\n"
res += '\n'.join(list(food_filter['Food_Name'].values[:5]))
res = res_timer(res, tracker)
dispatcher.utter_message(text="%s" % res)
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionPersonalizationList(Action):
def name(self) -> Text:
return "action_personlization_list"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
pkl_db = './persons.pkl'
if path.exists(pkl_db):
df = pd.read_pickle(pkl_db)
dispatcher.utter_message(text="%s" % df.to_string())
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ActionPersonalizationRemove(Action):
def name(self) -> Text:
return "action_personlization_remove"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
try:
pkl_db = './persons.pkl'
if path.exists(pkl_db):
df = pd.read_pickle(pkl_db)
phone_slot = tracker.get_slot("phone")
if phone_slot in df.index:
df = df.drop(tracker.get_slot("phone"))
df.to_pickle(pkl_db)
dispatcher.utter_message(text="רישומך הוסר מן המערכת")
else:
dispatcher.utter_message(text="אינך מופיע במערכת, לכן אין צורך בהסרת רישום")
except Exception as e:
res = "אין לי מושג, מצטער!"
res = res_error(res, tracker, e)
dispatcher.utter_message(text=res)
return [SlotSet("x", None), SlotSet("y", None), SlotSet("previous_intent", None)]
# ------------------------------------------------------------------
class ProfileFormValidator(FormValidationAction):
"""ProfileForm Validator"""
def name(self) -> Text:
return "validate_profile_form"
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
async def required_slots(
self,
slots_mapped_in_domain: List[Text],
dispatcher: "CollectingDispatcher",
tracker: "Tracker",
domain: "DomainDict",
) -> Optional[List[Text]]:
required_slots = ["phone", "username", "gender", "age", "weight", "height"]
return required_slots
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
"""A dictionary to map required slots to
- an extracted entity
- intent: value pairs
- a whole message
or a list of them, where a first match will be picked"""
return {
"phone": [
self.from_entity(entity="integer", role="phone"),
self.from_entity(entity="integer"),
self.from_text(),
],
"username": [
self.from_entity(entity="name"),
self.from_text(),
],
"gender": [
self.from_entity(entity="gender"),
],
"age": [
self.from_entity(entity="integer", role="age"),
self.from_entity(entity="integer"),
self.from_text(),
],
"weight": [
self.from_entity(entity="integer", role="weight"),
self.from_entity(entity="integer"),
self.from_text(),
],
"height": [
self.from_entity(entity="integer", role="height"),
self.from_entity(entity="integer"),
self.from_text(),
],
}
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
def validate_phone(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate phone value."""
requested_slot = tracker.get_slot("requested_slot")
phone_slot = tracker.get_slot("phone")
phone_value = None
if requested_slot == "phone":
phone_value = value.replace('-', '').replace(' ', '')
pkl_db = './persons.pkl'
if path.exists(pkl_db):
df = pd.read_pickle(pkl_db)
if phone_value in df.index:
dispatcher.utter_message(
text="פרטיך נטענו בהצלחה, ברוכים השבים %s" % df.loc[phone_value].username)
return {'phone': phone_value,
'username': df.loc[phone_value].username,
'gender': df.loc[phone_value].gender,
'age': df.loc[phone_value].age,
'weight': df.loc[phone_value].weight,
'height': df.loc[phone_value].height}
else:
df = pd.DataFrame(columns=["username", "gender", "age", "weight", "height"])
df.to_pickle(pkl_db)
elif phone_slot:
phone_value = phone_slot
return {"phone": phone_value}
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
def validate_username(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate username value."""
requested_slot = tracker.get_slot("requested_slot")
username_slot = tracker.get_slot("username")
username_value = None
if requested_slot == "username":
username_value = value
elif username_slot:
username_value = username_slot
return {"username": username_value}
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
def validate_gender(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate gender value."""
requested_slot = tracker.get_slot("requested_slot")
gender_slot = tracker.get_slot("gender")
gender_value = None
if requested_slot == "gender":
gender_value = value
elif gender_slot:
gender_value = gender_slot
return {"gender": gender_value}
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
def validate_age(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate age value."""
requested_slot = tracker.get_slot("requested_slot")
age_slot = tracker.get_slot("age")
age_value = None
if requested_slot == "age":
age_value = value
elif age_slot:
age_value = age_slot
return {"age": age_value}
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
def validate_weight(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate weight value."""
requested_slot = tracker.get_slot("requested_slot")
weight_slot = tracker.get_slot("weight")
weight_value = None
if requested_slot == "weight":
weight_value = value
elif weight_slot:
weight_value = weight_slot
return {"weight": weight_value}
# -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
def validate_height(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate height value."""
requested_slot = tracker.get_slot("requested_slot")
height_slot = tracker.get_slot("height")
height_value = None
if requested_slot == "height":
height_value = value
pkl_db = './persons.pkl'
if path.exists(pkl_db):
df = | pd.read_pickle(pkl_db) | pandas.read_pickle |
# -*- coding: utf-8 -*-
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements adapter for pmdarima forecasters to be used in sktime framework."""
__author__ = ["mloning", "hyang1996", "kejsitake", "fkiraly"]
__all__ = ["_PmdArimaAdapter"]
import pandas as pd
from sktime.forecasting.base import BaseForecaster
from sktime.forecasting.base._base import DEFAULT_ALPHA
class _PmdArimaAdapter(BaseForecaster):
"""Base class for interfacing pmdarima."""
_tags = {
"ignores-exogeneous-X": False,
"capability:pred_int": True,
"requires-fh-in-fit": False,
"handles-missing-data": True,
}
def __init__(self):
self._forecaster = None
super(_PmdArimaAdapter, self).__init__()
def _instantiate_model(self):
raise NotImplementedError("abstract method")
def _fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list, np.array or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
self._forecaster = self._instantiate_model()
self._forecaster.fit(y, X=X)
return self
def _predict(self, fh, X=None):
"""Make forecasts.
Parameters
----------
fh : array-like
The forecasters horizon with the steps ahead to to predict.
Default is
one-step ahead forecast, i.e. np.array([1]).
Returns
-------
y_pred : pandas.Series
Returns series of predicted values.
"""
# distinguish between in-sample and out-of-sample prediction
fh_oos = fh.to_out_of_sample(self.cutoff)
fh_ins = fh.to_in_sample(self.cutoff)
# all values are out-of-sample
if fh.is_all_out_of_sample(self.cutoff):
return self._predict_fixed_cutoff(fh_oos, X=X)
# all values are in-sample
elif fh.is_all_in_sample(self.cutoff):
return self._predict_in_sample(fh_ins, X=X)
# both in-sample and out-of-sample values
else:
y_ins = self._predict_in_sample(fh_ins, X=X)
y_oos = self._predict_fixed_cutoff(fh_oos, X=X)
return y_ins.append(y_oos)
def _predict_in_sample(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Generate in sample predictions.
Parameters
----------
fh : array-like
The forecasters horizon with the steps ahead to to predict.
Default is
one-step ahead forecast, i.e. np.array([1]).
Returns
-------
y_pred : pandas.Series
Returns series of predicted values.
"""
if hasattr(self, "order"):
diff_order = self.order[1]
else:
diff_order = self._forecaster.model_.order[1]
# Initialize return objects
fh_abs = fh.to_absolute(self.cutoff).to_numpy()
fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)
y_pred = pd.Series(index=fh_abs)
# for in-sample predictions, pmdarima requires zero-based integer indicies
start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
if start < 0:
# Can't forecasts earlier to train starting point
raise ValueError("Can't make predictions earlier to train starting point")
elif start < diff_order:
# Can't forecasts earlier to arima's differencing order
# But we return NaN for these supposedly forecastable points
start = diff_order
if end < start:
# since we might have forced `start` to surpass `end`
end = diff_order
# get rid of unforcastable points
fh_abs = fh_abs[fh_idx >= diff_order]
# reindex accordingly
fh_idx = fh_idx[fh_idx >= diff_order] - diff_order
result = self._forecaster.predict_in_sample(
start=start,
end=end,
X=X,
return_conf_int=False,
alpha=DEFAULT_ALPHA,
)
if return_pred_int:
pred_ints = []
for a in alpha:
pred_int = pd.DataFrame(index=fh_abs, columns=["lower", "upper"])
result = self._forecaster.predict_in_sample(
start=start,
end=end,
X=X,
return_conf_int=return_pred_int,
alpha=a,
)
pred_int.loc[fh_abs] = result[1][fh_idx, :]
pred_ints.append(pred_int)
# unpack results
y_pred.loc[fh_abs] = result[0][fh_idx]
return y_pred, pred_ints
else:
y_pred.loc[fh_abs] = result[fh_idx]
return y_pred
def _predict_fixed_cutoff(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Make predictions out of sample.
Parameters
----------
fh : array-like
The forecasters horizon with the steps ahead to to predict.
Default is
one-step ahead forecast, i.e. np.array([1]).
Returns
-------
y_pred : pandas.Series
Returns series of predicted values.
"""
n_periods = int(fh.to_relative(self.cutoff)[-1])
result = self._forecaster.predict(
n_periods=n_periods,
X=X,
return_conf_int=False,
alpha=DEFAULT_ALPHA,
)
fh_abs = fh.to_absolute(self.cutoff)
fh_idx = fh.to_indexer(self.cutoff)
if return_pred_int:
pred_ints = []
for a in alpha:
result = self._forecaster.predict(
n_periods=n_periods,
X=X,
return_conf_int=True,
alpha=a,
)
pred_int = result[1]
pred_int = pd.DataFrame(
pred_int[fh_idx, :], index=fh_abs, columns=["lower", "upper"]
)
pred_ints.append(pred_int)
return result[0], pred_ints
else:
return | pd.Series(result[fh_idx], index=fh_abs) | pandas.Series |
#!/usr/bin/env python3
"""Script to get the classification performance."""
import argparse
from pathlib import Path
import random as rn
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
from joblib import load
from utils import COLUMNS_NAME, load_dataset
PROJECT_ROOT = Path.cwd()
def main(dataset_name, disease_label, evaluated_dataset):
"""Calculate the performance of the classifier in each iteration of the bootstrap method."""
# ----------------------------------------------------------------------------
n_bootstrap = 1000
participants_path = PROJECT_ROOT / 'data' / evaluated_dataset / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / evaluated_dataset / 'freesurferData.csv'
outputs_dir = PROJECT_ROOT / 'outputs'
ids_path = outputs_dir / (evaluated_dataset + '_homogeneous_ids.csv')
hc_label = 1
# ----------------------------------------------------------------------------
# Set random seed
random_seed = 42
np.random.seed(random_seed)
rn.seed(random_seed)
classifier_dir = PROJECT_ROOT / 'outputs' / 'classifier_analysis'
classifier_dataset_dir = classifier_dir / dataset_name
classifier_dataset_analysis_dir = classifier_dataset_dir / '{:02d}_vs_{:02d}'.format(hc_label, disease_label)
classifier_storage_dir = classifier_dataset_analysis_dir / 'models'
generalization_dir = classifier_dataset_analysis_dir / 'generalization'
generalization_dir.mkdir(exist_ok=True)
evaluated_dataset_df = load_dataset(participants_path, ids_path, freesurfer_path)
aucs_test = []
# ----------------------------------------------------------------------------
for i_bootstrap in tqdm(range(n_bootstrap)):
rvm = load(classifier_storage_dir / '{:03d}_rvr.joblib'.format(i_bootstrap))
scaler = load(classifier_storage_dir / '{:03d}_scaler.joblib'.format(i_bootstrap))
x_data = evaluated_dataset_df[COLUMNS_NAME].values
tiv = evaluated_dataset_df['EstimatedTotalIntraCranialVol'].values
tiv = tiv[:, np.newaxis]
x_data = (np.true_divide(x_data, tiv)).astype('float32')
x_data = np.concatenate((x_data[evaluated_dataset_df['Diagn'] == hc_label],
x_data[evaluated_dataset_df['Diagn'] == disease_label]), axis=0)
y_data = np.concatenate((np.zeros(sum(evaluated_dataset_df['Diagn'] == hc_label)),
np.ones(sum(evaluated_dataset_df['Diagn'] == disease_label))))
# Scaling using inter-quartile
x_data = scaler.transform(x_data)
pred = rvm.predict(x_data)
predictions_proba = rvm.predict_proba(x_data)
auc = roc_auc_score(y_data, predictions_proba[:, 1])
aucs_test.append(auc)
aucs_df = | pd.DataFrame(columns=['AUCs'], data=aucs_test) | pandas.DataFrame |
from math import nan
import os
import numpy as np
import pandas as pd
import warnings
from scipy import signal as scisig
from numpy import matlib
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.stats import binom
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.patches as mplpatches
import matplotlib.ticker as ticker
from matplotlib.ticker import LinearLocator
import matplotlib.lines as lines
import matplotlib.transforms as transforms
import string
import pdb
import seaborn as sns
import sys
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore")
def unique_py(seqlist):
seen = set()
seen_add = seen.add
return [x for x in seqlist if not (x in seen or seen_add(x))]
##COMPUTATION OF INTERMUTATIONAL DISTANCE
#check computation of IMD
#major difference is whether its the closest breakpoint or the breakpoint immediately preceding it
def calcIntermutDist2(subs_type,first_chrom_na=False):
subs_type_processed = subs_type.copy()
chr_list = unique_py(subs_type['chr'])
pos_array_im = subs_type['position'].values
index_orig_df = np.arange(len(subs_type_processed))
#args_pos_list = np.argsort(pos_array_im)
args_pos_list=[]
distPrev_list =[]
prevPos_list =[]
for c in chr_list:
inds_chr = np.where(subs_type['chr']==c)
pos_array_im_c = np.sort(pos_array_im[inds_chr])
index_orig_df[inds_chr] = index_orig_df[inds_chr][np.argsort(pos_array_im[inds_chr])]
if first_chrom_na:
prevPos_arr_c = np.hstack((np.NAN,pos_array_im_c.flatten()[:-1]))
else:
prevPos_arr_c = np.hstack((0,pos_array_im_c.flatten()[:-1]))
distPrev_arr_c = pos_array_im_c - prevPos_arr_c
distPrev_arr_c[distPrev_arr_c==0] = 1
distPrev_list = np.append(distPrev_list,distPrev_arr_c.astype(int)).flatten()
prevPos_list = np.append(prevPos_list,prevPos_arr_c.astype(int)).flatten()
prevPos_arr_c = []
distPrev_arr_c = []
subs_type_processed = subs_type_processed.reindex(index_orig_df).reset_index(drop=True)
subs_type_processed['prevPos'] = prevPos_list
subs_type_processed['distPrev'] = distPrev_list
return subs_type_processed
def calcIntermutDist(subs_type,first_chrom_na=False):
subs_type_processed = | pd.DataFrame() | pandas.DataFrame |
from datetime import timedelta
import pandas as pd
import numpy as np
from exceptions import MatchCheckpointsException
def find_acute(df, i, segment, near, epsilon):
"""
1. Calculate the distance from the chekpoint to all consecutive points in the data.
2. Acute, For ck "i" if the angle between the lines ck:A and A:B acute. If so then ck is "between" A and B
Epislon is the fudge factor
"""
point = (segment['location']['lat'], segment['location']['lon'])
df[f'ck_to_A{i}'] = np.linalg.norm(df[['Latitude', 'Longitude']].values - point, axis=1)
df[f'ck_to_B{i}'] = np.linalg.norm(df[['shift_Latitude', 'shift_Longitude']].values - point,
axis=1)
if df[f'ck_to_A{i}'].min() > near * 10:
raise MatchCheckpointsException(
f"It appears you never made it close to checkpoint {segment['segment_name']}")
df['acute'] = df[f'ck_to_A{i}'] ** 2 + df['dist_to_next'] ** 2 <= df[
f'ck_to_B{i}'] ** 2 + epsilon
def match_checkpoints(df, epsilon, near, segments):
"""
Identify the activity point the represents the arrival at the checkpoint
find near points that form acute triangles
"""
row_slice = 0
for i, seg in enumerate(segments):
try:
find_acute(df, i, seg, near, epsilon)
# assign segment number to first acute point near the point seg point)
df.loc[
df[row_slice:][(df[row_slice:][f'ck_to_A{i}'] <= near) &
(df[row_slice:].acute)].index[0], ['checkpoint', 'Segment_name']] = i, seg[
'Segment_name']
# This removes the points we have past.
row_slice = int(df[df.checkpoint == i].index[0])
# df['seg_duration'] = df[df.checkpoint >= 0]['Date_Time'].diff()
except Exception as e:
raise MatchCheckpointsException(
f"Fail on checkpoint:{i} location: {(seg['location']['lat'], seg['location']['lon'])}\nDataframe columns:\n{df.columns}")
def calculate_segment_times(df, segments):
"""
This is for fix distance segments, competing for time
this selects only rows match to checkpoints. The calcs the diff in time.
"""
df['seg_duration'] = df[df.checkpoint >= 0]['Date_Time'].diff()
df['segment'] = df.checkpoint.fillna(method='ffill')
# Set everything at the end to nan
df['segment'][df.segment >= len(segments) - 1] = np.nan
# TODO Add segment metrics
def calculate_segment_distance(df, segments):
"""
This is for fixed distance competeing for distance TicToc
[{
'segment_name': 'Event Start',
'location': {'lat': 39.737912, 'lon': -105.523881},
'type_name': 'transport',
'type_args': {'time_limit': 1800}
'duration': Timedelta('0 days 00:24:21'),
'datetime': Timestamp('2012-07-21 09:18:13'),
'distance': 25677
'total_timed': datetime.timedelta(0),
total_timed_types: {'uphill':Timedelta(123), 'gravel': Timedelta(321)}
},]
"""
results = []
for i, seg in enumerate(segments):
if seg['type_name'] == 'tictoc':
seg_start_time = df[df.checkpoint == i].Date_Time.values[0]
seg_end_time = seg_start_time + pd.Timedelta(minutes=seg['type_args']['tictoc'])
seg_past_end = df[df.Date_Time >= seg_end_time].iloc[0]
seg_before_end = df[df.Date_Time <= seg_end_time].iloc[-1]
a = seg_before_end.distance
b = seg_past_end.distance
c = seg_before_end.Date_Time
d = seg_past_end.Date_Time
p = seg_end_time
seg_finish = (b - a) * ((p - d) / (d - c)) + a
seg_distance = seg_finish - df[df.checkpoint == i].distance.iloc[0]
seg['distance'] = seg_distance
seg['duration'] = timedelta(minutes=seg['type_args']['tictoc'])
seg['datetime'] = | pd.to_datetime(seg_start_time) | pandas.to_datetime |
import io
import os
from datetime import datetime
import pandas as pd
import scrapy
from scrapy import Request
from scrapy import signals
from fooltrader.api.quote import get_security_list
from fooltrader.contract.data_contract import KDATA_COLUMN_STOCK, KDATA_COLUMN_163
from fooltrader.contract.files_contract import get_kdata_path
from fooltrader.utils import utils
class FutureShfeSpider(scrapy.Spider):
name = "future_shfe_spider"
custom_settings = {
# 'DOWNLOAD_DELAY': 2,
# 'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
'SPIDER_MIDDLEWARES': {
'fooltrader.middlewares.FoolErrorMiddleware': 1000,
}
}
# 指定日期的话,是用来抓增量数据的
def yield_request(self, item, start_date=None, end_date=None):
data_path = get_kdata_path(item, source='163')
if start_date:
start = start_date.strftime('%Y%m%d')
else:
start = item['listDate'].replace('-', '')
if end_date:
end = end_date.strftime('%Y%m%d')
else:
end = datetime.today().strftime('%Y%m%d')
if not os.path.exists(data_path) or start_date or end_date:
if item['exchange'] == 'sh':
exchange_flag = 0
else:
exchange_flag = 1
url = self.get_k_data_url(exchange_flag, item['code'], start, end)
yield Request(url=url, meta={'path': data_path, 'item': item},
callback=self.download_day_k_data)
def start_requests(self):
item = self.settings.get("security_item")
start_date = self.settings.get("start_date")
end_date = self.settings.get("end_date")
if item is not None:
for request in self.yield_request(item, start_date, end_date):
yield request
else:
for _, item in get_security_list().iterrows():
for request in self.yield_request(item):
yield request
def download_day_k_data(self, response):
path = response.meta['path']
item = response.meta['item']
try:
# 已经保存的csv数据
if os.path.exists(path):
df_current = | pd.read_csv(path, dtype=str) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Analyze open data set of webstore Dream Market's "Cocaine" category
#
# Determine what are the factors that influence price of cocaine, based on the dataset from https://www.kaggle.com/everling/cocaine-listings
# The dataset is approximately 1,400 cleaned and standardized product listings from Dream Market's "Cocaine" category. It was collected with web-scraping and text extraction techniques in July 2017.
#
# As bonus questions, we can also:
# - Compare Australian market with rest of the world
# - Determine if there is volume discount and how significant it is
#
#
#
# # Table of contents
# The main parts of this notebook are:
# 1. [Load data into DataFrames](#load)
# 1. [Check the data](#check)
# 1. [Data Analysis](#analysis)
# 1. [Data visualisation to identity possible hypothesis](#raw_plots)
# 1. [Correlation matrix](#corr)
# 1. [Data distribution](#distribution)
# 1. [Summary and hypothesis](#hyp)
#
# 1. [Data Transformation and features engineering](#eng)
# 1. [Model and model evaluation](#model)
# 1. [Model Result](#model_result)
# 1. [Output Reports based on the model data](#summary)
# 1. [Report 1 Gram of Q75 by Country of shipment](#rep_1)
# 1. [Report 2 Cost per gram of Q90 by Grams Pack in Australia](#rep_2)
# 1. [Report 3 Cost for 1 gram by Quality in AU](#rep_3)
# 1. [Repot 4. Factor plot](#rep_4)
#
# 1. [Next steps](#next)
#
# In[3]:
#imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, metrics
import statsmodels.api as sm
import seaborn as sns
print()
import warnings
warnings.simplefilter('ignore')
# <a name="load"></a>
# ## Upload Dreamworld Data set
# In[5]:
from subprocess import check_output
# Any results you write to the current directory are saved as output.
filename = check_output(["ls", "../../../input/everling_cocaine-listings"]).decode("utf8").strip()
df_raw = pd.read_csv("../../../input/everling_cocaine-listings/" + filename, thousands=",")
df_raw.head()
# <a name="analysis"></a>
# ### Data Exploration
# <a name="corr"></a>
# ### Correlation matrix
# Let's see how different variable correlated each other and see if we can make some initial conclusion out of the result
# In[44]:
#check correlation
df_corr = df_raw[['grams', 'quality', 'btc_price', 'cost_per_gram', 'cost_per_gram_pure', 'rating']].corr()
print (df_corr)
print()
# Ther is an expected *Correlation* between:
# * cost per gram and cost per gram pure
# * btc price and grams
#
# However what is interesting that no correlation between quality and cost per gram pure
#
# <a id="distribution"></a>
# Let's dig now in categorical variables and see how different shipment_from countries impact the price of cocaine
# In[10]:
#visual on categorical variable ship from
sns.factorplot( x='ships_from', y='cost_per_gram', data=df_raw, kind='box', size = 3, aspect=5 )
# *Result: The price is more or less the same across all countries except Australia, which has noticable difference in comarison to other countries. China has the least expecnsive, but it seems that number of offers is significantly lower*
# In[11]:
fg = sns.FacetGrid(df_raw[['quality', 'cost_per_gram', 'ships_from']], hue='ships_from',size=5, aspect=3)
fg.map(plt.scatter, 'quality', 'cost_per_gram').add_legend()
# *result: surprisingly, quality has no significant impact on the price of the product*
# <a name="hyp"></a>
# ### Results and hypotesis
# Observations:
# - Quality variable, despite on continues nature, has very strong patterns and has very limited options to select from. Due to the variable is not normal distributed I can assume that we can pack it into classes and analyse as categorical variable. For the sake of simplicity I use all of available quality options.
# - Grams variable also has a very strong packing pattern. Most likely, use it as categorical variable will bring us better quality
# - Quality has no correlation with other variables, even with Price
# - Number of transactions in Europe is greater than in Australia, USA and significantly greater than in ASIA. My assumption is that the most trustful result should be about Europe. Reasonable assumptions may apply for other regions.
#
# Hypothesis:
#
# The most influenced factors on cocaine price are the shipment location and transaction volume, based on the dataset of the dream market web shop.
#
# In[ ]:
# <a id="eng"></a>
# ## Data Transformation and Feature engineering
# In[14]:
#Plot data distributions
# Set up the matplotlib figure
f, axes = plt.subplots(2, 3, figsize=(10, 10), sharex=False, sharey=False)
sns.distplot(df_raw['cost_per_gram_pure'], ax=axes[0, 0])
sns.distplot(df_raw['cost_per_gram'], ax=axes[0, 1])
sns.distplot(df_raw['quality'], ax=axes[0, 2])
sns.distplot(df_raw['btc_price'], ax=axes[1,0])
sns.distplot(df_raw['grams'], ax=axes[1,1])
sns.distplot(df_raw['rating'], ax=axes[1,2])
# In[15]:
df_raw[['grams', 'quality', 'btc_price', 'cost_per_gram', 'cost_per_gram_pure', 'rating']].skew() # Check skewness
# *Result: the data is quite skewed. will see how reasonable the model performs. We may need to do some extra work on it:*
# * dropping some the first peak in *cost per gram*
# * Check if we can drop some variable from the model
#
# In[16]:
df_raw[['grams', 'quality', 'btc_price', 'cost_per_gram', 'cost_per_gram_pure', 'rating']].kurt() # Check kurtosis
# *Result: all results has a significant positive kurtosis. Some further actions may required*
# In[18]:
#Attemp to fix kurtosis and have better normal distribution by appling log function
df_raw['btc_price_log'] = df_raw['btc_price'].apply(np.log)
# In[19]:
f, axes = plt.subplots(1, 2, figsize=(10, 7), sharex=False, sharey=False)
sns.distplot(df_raw['btc_price'], ax=axes[0])
sns.distplot(df_raw['btc_price_log'], ax=axes[1])
# In[20]:
df_raw[['btc_price', 'btc_price_log']].skew() # Check kurtosis
# *result: Log function improves kurtosis quite significanly. btc_price_log will be used as outcome variable *
# ### Add dummy Variables
# In[ ]:
# In[21]:
print ("grams unique categories")
print (df_raw['grams'].unique())
# In[22]:
#Add dummy categories for grams
df_raw['grams'].unique()
dummy_rank = | pd.get_dummies(df_raw['grams'], prefix='grams', prefix_sep='_') | pandas.get_dummies |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
#from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import pandas as pd
import numpy as np
from ..base import classifier
from ..utils import convert_to_numpy_ndarray, convert_to_list
from sklearn.utils import check_X_y
from scipy.sparse import csr
class Multinomial_NB_classifier_from_scratch(classifier):
# reference: https://geoffruddock.com/naive-bayes-from-scratch-with-numpy/
# reference: http://kenzotakahashi.github.io/naive-bayes-from-scratch-in-python.html
def __init__(self, alpha=1.0, verbose=False):
super().__init__()
self.alpha = alpha # to avoid having zero probabilities for words not seen in our training sample.
self.y_classes = None # e.g., spam vs. no spam
self.prob_y = None # Our prior belief in the probability of any randomly selected message belonging to a particular class
self.prob_x_i_given_y = None # The likelihood of each word, conditional on message class.
self.is_fitted = False
self.verbose = verbose
def fit(self, X_train: np.ndarray, y_train: np.ndarray, feature_names: list = None, document: list = None):
"""
X_train: a matrix of samples x features, such as documents (row) x words (col)
"""
document = convert_to_list(document)
X_train = convert_to_numpy_ndarray(X_train)
y_train = convert_to_numpy_ndarray(y_train)
self.X_train, self.y_train = check_X_y(X_train, y_train)
n_samples, n_features = X_train.shape
if feature_names is None:
self.feature_names = [f"word_{i}" for i in range(1,n_features+1)]
else:
self.feature_names = feature_names
self.y_classes = np.unique(y_train)
self.classes_ = self.y_classes
columns = [f"y={c}" for c in self.y_classes]
self.y_mapper = {}
for idx, y_class in enumerate(self.y_classes):
self.y_mapper[idx] = f"class_idx[{idx}]=[{y_class}]"
X_train_by_y_class = np.array([X_train[y_train == this_y_class] for this_y_class in self.y_classes], dtype=object)
self.prob_y = np.array([X_train_for_this_y_class.shape[0] / n_samples for X_train_for_this_y_class in X_train_by_y_class])
if self.verbose:
print(f"\n------------------------------------------ fit() ------------------------------------------")
print(f"\nStep 1. the input:\n{pd.concat([ | pd.DataFrame(document,columns=['X_message_j',]) | pandas.DataFrame |
import os
import tarfile
import urllib
import os
import os
os.chdir('/Users/xxx/PycharmProjects/handson-ml2/datasets')
cwd = os.getcwd()
#downloading dataset
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
HOUSING_PATH = os.path.join(cwd, "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
os.makedirs(housing_path, exist_ok=True)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return | pd.read_csv(csv_path) | pandas.read_csv |
import time
import argparse
import datetime
import numpy as np
import pandas as pd
import torch
import itertools
import yaml
from src.utils.preprocess_utils import get_datasets, get_dataloaders
from src.utils.utils import GRIDSEARCH_CSV
from src.training.main import main
def get_gridsearch_config(config_path):
with open(config_path, "r") as ymlfile:
config = yaml.load(ymlfile, Loader=yaml.FullLoader)
hyperparameters = config['hyperparameters']
print('hyperparameters keys', list(hyperparameters.keys()))
all_config_list = []
for param_name in hyperparameters.keys():
all_config_list.append(hyperparameters[param_name])
return all_config_list
def gridsearch(config_path, training_data, testset_data, test_labels_data, do_save, device):
all_config_list = get_gridsearch_config(config_path)
training_remaining = np.prod([len(config) for config in all_config_list])
print('Training to do:', training_remaining)
# Save gridsearch training to csv
current_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
csv_path = GRIDSEARCH_CSV+'results_{}.csv'.format(current_time)
results_dict = {'model_type': [],
'optimizer_type': [],
'loss_criterion': [],
'lr': [],
'epochs': [],
'batch_size': [],
'patience_es': [],
'scheduler_type': [],
'patience_lr': [],
'save_condition': [],
'fix_length': [],
'best_epoch': [],
'train_loss': [],
'val_loss': [],
'train_acc': [],
'val_acc': [],
'test_acc': [],
'end_time': []}
# Start gridsearch
prev_model_type = None
start_time = time.time()
for params in itertools.product(*all_config_list):
# /!\ Has to be in the same order as in the config.yaml file /!\ #
model_type, optimizer_type, \
loss_criterion, lr, epochs, \
batch_size, patience_es, \
scheduler_type, patience_lr, \
save_condition, fix_length, context_size, pyramid, fcs, batch_norm, alpha = params
if prev_model_type != model_type:
print("prev_model_type", prev_model_type)
print("model_type", model_type)
print("Changing tokenizer...")
ENGLISH, tokenizer, train_data, val_data, test_data = get_datasets(training_data,
testset_data, test_labels_data,
model_type, fix_length)
prev_model_type = model_type
print('fix_length:', fix_length)
print('batch_size:', batch_size)
if model_type == 'PyramidCNN':
print('context_size:', context_size)
print('pyramid:', pyramid)
print('fcs:', fcs)
print('batch_norm:', batch_norm)
print('alpha:', alpha)
dataloaders = get_dataloaders(train_data, val_data, test_data, batch_size, device)
history_training = main(dataloaders, ENGLISH, model_type, optimizer_type,
loss_criterion, lr, batch_size, epochs, patience_es,
do_save, device,
do_print=False, training_remaining=training_remaining,
scheduler_type=scheduler_type, patience_lr=patience_lr,
save_condition=save_condition, fix_length=fix_length,
context_size=context_size, pyramid=pyramid, fcs=fcs,
batch_norm=batch_norm, alpha=alpha)
# Save training results to csv
best_epoch = history_training['best_epoch']
for key in results_dict.keys():
if key in ['train_loss', 'val_loss', 'train_acc', 'val_acc']:
results_dict[key].append(history_training[key][best_epoch])
elif key == 'epochs':
results_dict[key].append(epochs)
elif key == 'batch_size':
results_dict[key].append(batch_size)
else:
results_dict[key].append(history_training[key])
results_csv = | pd.DataFrame(data=results_dict) | pandas.DataFrame |
import requests
from model.parsers import model as m
import pandas as pd
import datetime
dataset = m.initialize()
unique_dates = list()
raw_data = requests.get('https://api.covid19india.org/states_daily.json')
raw_json = raw_data.json()
for item in raw_json['states_daily']:
if item['date'] not in unique_dates:
unique_dates.append(item['date'])
for date in unique_dates:
for item in raw_json['states_daily']:
if date == item['date']:
for state in dataset:
if date not in state:
state[date] = dict()
state[date][item['status']] = item[state['code']]
def date_validate(date_text):
try:
datetime.datetime.strptime(date_text, '%d-%b-%y')
except ValueError:
print("Incorrect date format, should be dd-Mmm-yy")
return 0
def state_code_validate(state_code):
unique_states = list()
for item in dataset:
unique_states.append(item['code'])
if state_code in unique_states:
return 1
else:
print('Please enter a valid state code')
return 0
def needs_patch(date_to_fetch, state_code):
if (date_to_fetch == '26-Mar-20' and state_code == 'ap') or (date_to_fetch == '16-Mar-20' and state_code == 'mp'):
return True
return False
def apply_patch(date_to_fetch, state_code):
if date_to_fetch == '26-Mar-20' and state_code == 'ap':
return {'Confirmed': '1', 'Recovered': '0', 'Deceased': '0'}
if date_to_fetch == '16-Mar-20' and state_code == 'mp':
return {'Confirmed': '0', 'Recovered': '0', 'Deceased': '0'}
def fetch_by_date_and_code(date_to_fetch, state_code):
if(needs_patch(date_to_fetch, state_code)):
return apply_patch(date_to_fetch, state_code)
if date_to_fetch == '26-Mar-20' and state_code == 'ap':
return {'Confirmed': '1', 'Recovered': '0', 'Deceased': '0'}
if date_to_fetch == '16-Mar-20' and state_code == 'mp':
return {'Confirmed': '0', 'Recovered': '0', 'Deceased': '0'}
if date_to_fetch in unique_dates:
for state in dataset:
if state['code'] == state_code:
if date_to_fetch in state:
return state[date_to_fetch]
else :
print('date does not exist')
def cumulative_datewise_data(date_to_fetch, state_code):
should_stop = False
for unique_date in unique_dates:
if unique_date == date_to_fetch:
should_stop = True
print(unique_date, fetch_by_date_and_code(unique_date, state_code))
if should_stop:
break
def cumulative_data(date_to_fetch, state_code):
should_stop = False
cumulative_dict = dict()
if date_to_fetch in unique_dates:
for unique_date in unique_dates:
if unique_date == date_to_fetch:
should_stop = True
returned_dict = fetch_by_date_and_code(unique_date, state_code)
for key in returned_dict:
if key in cumulative_dict:
cumulative_dict[key] += int(returned_dict[key])
else:
cumulative_dict[key] = int(returned_dict[key])
if should_stop:
break
return cumulative_dict
else:
return 0
def cumulative_series_datewise_data(date_to_fetch, state_code):
should_stop = False
cumulative_series_datewise_dict = dict()
if date_to_fetch in unique_dates:
for unique_date in unique_dates:
if unique_date == date_to_fetch:
should_stop = True
cumulative_series_datewise_dict[unique_date] = cumulative_data(unique_date, state_code)
if should_stop:
break
return cumulative_series_datewise_dict
else:
print('date does not exist')
def cumulative_last_3_days(state_code, should_print = False):
resultset = dict()
for unique_date in unique_dates[-3:]:
resultset[unique_date] = cumulative_data(unique_date, state_code)
if should_print:
print(unique_date, cumulative_data(unique_date, state_code))
return resultset
def cumulative_last_3_days_all_states(choice):
resultset = dict()
for state in dataset:
resultset[state['name']] = cumulative_last_3_days(state['code'], False)
return resultset
def total_count(state_code):
cumulative_dict = dict()
for unique_date in unique_dates:
returned_dict = fetch_by_date_and_code(unique_date, state_code)
for key in returned_dict:
if key in cumulative_dict:
cumulative_dict[key] += int(returned_dict[key])
else:
cumulative_dict[key] = int(returned_dict[key])
return cumulative_dict
def make_data_frame():
unique_states = list()
confirmed_list = list()
recovery_list = list()
deceased_list = list()
for state in dataset[:-1]:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset[:-1]:
status = total_count(state['code'])
confirmed_list.append(status['Confirmed'])
recovery_list.append(status['Recovered'])
deceased_list.append(status['Deceased'])
data = {'STATE/UT':unique_states, 'Confirmed':confirmed_list, 'Recovered':recovery_list, 'Deceased':deceased_list}
df = pd.DataFrame(data, columns = ['STATE/UT', 'Confirmed', 'Recovered', 'Deceased'])
return df
def cumulative_last_3_days_confirmed_dataframe(choice):
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
resultset = cumulative_last_3_days_all_states(choice)
for state in resultset:
for date in resultset[state]:
if date not in date_dict:
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in resultset:
for date in resultset[state]:
dates[date].append(resultset[state][date]['Confirmed'])
data = {'STATE/UT':unique_states, unique_dates[0]:dates[unique_dates[0]], unique_dates[1]:dates[unique_dates[1]], unique_dates[2]:dates[unique_dates[2]]}
df = pd.DataFrame(data)
return df
def cumulative_last_3_days_recovered_dataframe(choice):
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
resultset = cumulative_last_3_days_all_states(choice)
for state in resultset:
for date in resultset[state]:
if date not in date_dict:
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in resultset:
for date in resultset[state]:
dates[date].append(resultset[state][date]['Recovered'])
data = {'STATE/UT':unique_states, unique_dates[0]:dates[unique_dates[0]], unique_dates[1]:dates[unique_dates[1]], unique_dates[2]:dates[unique_dates[2]]}
df = pd.DataFrame(data)
return df
def cumulative_last_3_days_deceased_dataframe(choice):
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
resultset = cumulative_last_3_days_all_states(choice)
for state in resultset:
for date in resultset[state]:
if date not in date_dict:
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in resultset:
for date in resultset[state]:
dates[date].append(resultset[state][date]['Deceased'])
data = {'STATE/UT':unique_states, unique_dates[0]:dates[unique_dates[0]], unique_dates[1]:dates[unique_dates[1]], unique_dates[2]:dates[unique_dates[2]]}
df = pd.DataFrame(data)
return df
def all_data_confirmed():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
dates[date].append(state[date]['Confirmed'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = pd.DataFrame(data)
return df
def all_data_recovered():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
dates[date].append(state[date]['Recovered'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = pd.DataFrame(data)
return df
def all_data_deceased():
unique_states = list()
dates = dict()
unique_dates = list()
date_dict = dict()
for state in dataset:
if state['name'] not in unique_states:
unique_states.append(state['name'])
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
date_dict[date] = list()
dates[date] = date_dict[date]
unique_dates.append(date)
for state in dataset:
for date in state:
if date != 'code' and date != 'name':
dates[date].append(state[date]['Deceased'])
data = {'STATE/UT':unique_states}
for date in dates:
data[date] = dates[date]
df = | pd.DataFrame(data) | pandas.DataFrame |
import warnings
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Double,
Integer,
NaturalLanguage,
)
from rayml.pipelines.components import PerColumnImputer
from rayml.utils.woodwork_utils import infer_feature_types
@pytest.fixture
def non_numeric_df():
X = pd.DataFrame(
[
["a", "a", "a", "a"],
["b", "b", "b", "b"],
["a", "a", "a", "a"],
[np.nan, np.nan, np.nan, np.nan],
]
)
X.columns = ["A", "B", "C", "D"]
return X
def test_invalid_parameters():
with pytest.raises(ValueError):
strategies = ("impute_strategy", "mean")
PerColumnImputer(impute_strategies=strategies)
with pytest.raises(ValueError):
strategies = ["mean"]
PerColumnImputer(impute_strategies=strategies)
def test_all_strategies():
X = pd.DataFrame(
{
"A": pd.Series([2, 4, 6, np.nan]),
"B": pd.Series([4, 6, 4, np.nan]),
"C": pd.Series([6, 8, 8, np.nan]),
"D": pd.Series(["a", "a", "b", np.nan]),
}
)
X.ww.init(logical_types={"D": "categorical"})
X_expected = pd.DataFrame(
{
"A": pd.Series([2, 4, 6, 4]),
"B": pd.Series([4, 6, 4, 4]),
"C": | pd.Series([6, 8, 8, 100]) | pandas.Series |
from .base_analysis import *
from scipy.stats import linregress
import scipy.stats
from scipy.sparse.linalg import svds
from math import ceil
import matplotlib.pyplot as pp
from scipy import linspace, sin
from scipy.interpolate import splrep, splev
import numpy
import numpy.random as npr
import json
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram
import pandas as pd
from sbaas.resources.cookb_signalsmooth import smooth
from sbaas.resources.legendre_smooth import legendre_smooth
from Bio.Statistics import lowess
class base_calculate():
def __init__(self):
self.data=[];
# calculations
# biomass normalization
def calculate_gdw_CVSAndCVSUnitsAndODAndConversionAndConversionUnits(self,cvs_I,cvs_units_I,od600_I,conversion_I,conversion_units_I):
# check units
if (cvs_units_I == 'mL' and conversion_units_I == 'gDW*L-1*OD600-1'):
gdw_O = cvs_I*1e-3*od600_I*conversion_I;
gdw_units_O = 'gDW';
return gdw_O, gdw_units_O;
else:
print('biomass conversion units do not match!')
exit(-1);
def calculate_cellVolume_CVSAndCVSUnitsAndODAndConversionAndConversionUnits(self,cvs_I,cvs_units_I,od600_I,conversion_I,conversion_units_I):
# check units
if (cvs_units_I == 'mL' and conversion_units_I == 'uL*mL-1*OD600-1'):
cellVolume_O = cvs_I*od600_I*conversion_I*1e-6;
cellVolume_units_O = 'L';
return cellVolume_O, cellVolume_units_O;
else:
print('cell volume conversion units do not match!')
exit(-1);
def calculate_conc_concAndConcUnitsAndDilAndDilUnitsAndConversionAndConversionUnits(self,conc_I,conc_units_I,dil_I,dil_units_I,conversion_I,conversion_units_I):
# check units
if (conc_units_I == 'uM' and conversion_units_I == 'L' and dil_units_I == 'mL'):
conc_O = (conc_I*1e-6)*(dil_I)*(1/conversion_I);
conc_units_O = 'mM';
return conc_O, conc_units_O;
elif (conc_units_I == 'uM' and conversion_units_I == 'gDW' and dil_units_I == 'mL'):
conc_O = (conc_I*1e-3)*(dil_I)*(1/conversion_I);
conc_units_O = 'umol*gDW-1';
return conc_O, conc_units_O;
elif ((conc_units_I == 'height_ratio' or conc_units_I == 'area_ratio') and (conversion_units_I == 'L' or conversion_units_I == 'gDW') and dil_units_I == 'mL'):
conc_O = (conc_I*1e-3)*(dil_I)*(1/conversion_I);
conc_units_O = conc_units_I;
return conc_O, conc_units_O;
else:
print('concentration normalization units do not match!')
exit(-1);
def calculate_cultureDensity_ODAndConversionAndConversionUnits(self,od600_I,conversion_I,conversion_units_I):
cultureDensity_O = od600_I*conversion_I;
cultureDensity_units_O = conversion_units_I.replace('*OD600-1','');
return cultureDensity_O, cultureDensity_units_O;
def calculate_biomass_CVSAndCVSUnitsAndODAndConversionAndConversionUnits(self,cvs_I,cvs_units_I,od600_I,conversion_I,conversion_units_I):
# check units
if (cvs_units_I == 'mL' and conversion_units_I == 'gDW*L-1*OD600-1'):
# return the biomass in gDW
gdw_O = cvs_I*1e-3*od600_I*conversion_I;
gdw_units_O = 'gDW';
return gdw_O, gdw_units_O;
elif (cvs_units_I == 'mL' and conversion_units_I == 'uL*mL-1*OD600-1'):
# return the cell volume in L
cellVolume_O = cvs_I*od600_I*conversion_I*1e-6;
cellVolume_units_O = 'L';
return cellVolume_O, cellVolume_units_O;
else:
print('biomass conversion units do not match!')
exit(-1);
# statistical analysis
# calculate the geometric mean and variance:
def calculate_ave_var_geometric(self,data_I):
# calculate the geometric average and var of data
# with 95% confidence intervals
try:
data_ave_O = 0.0
# calculate the average of the sample
for c in data_I:
data_ave_O += log(c);
data_ave_O = exp(data_ave_O/len(data_I));
data_var_O = 0.0
#calculate the variance of the sample
for c in data_I:
data_var_O += pow(log(c/data_ave_O),2);
data_var_O = data_var_O/(len(data_I)-1); #note: we will need to take the exp()
# to get the final variance
# but leaving it this way makes the
# downstream calculations simpler
#calculate the 95% confidence intervals
data_se = sqrt(data_var_O/len(data_I));
data_lb_O = exp(log(data_ave_O) - 1.96*data_se);
data_ub_O = exp(log(data_ave_O) + 1.96*data_se);
#correct the variance for use in reporting
data_var_O = exp(data_var_O);
return data_ave_O, data_var_O, data_lb_O, data_ub_O;
except Exception as e:
print(e);
exit(-1);
# calculate the mean and variance:
def calculate_ave_var(self,data_I,confidence_I = 0.95):
# calculate the average and var of data
# with 95% confidence intervals
try:
data = numpy.array(data_I);
data_ave_O = 0.0
# calculate the average of the sample
data_ave_O = numpy.mean(data);
data_var_O = 0.0
#calculate the variance of the sample
data_var_O = numpy.var(data);
#calculate the standard error of the sample
se = scipy.stats.sem(data)
#calculate the 95% confidence intervals
n = len(data);
h = se * scipy.stats.t._ppf((1+confidence_I)/2., n-1)
data_lb_O = data_ave_O - h;
data_ub_O = data_ave_O + h;
return data_ave_O, data_var_O, data_lb_O, data_ub_O;
except Exception as e:
print(e);
exit(-1);
# calculate the confidence intervals
def calculate_ciFromPoints(self,data_I, alpha=0.05):
"""Calculate the confidence intervals from sampled points"""
data_sorted = numpy.sort(data_I)
n = len(data_sorted)
lb = data_sorted[int((alpha/2.0)*n)]
ub = data_sorted[int((1-alpha/2.0)*n)]
return lb,ub
def bootstrap(self,data, num_samples=100000, statistic=numpy.mean, alpha=0.05):
"""Returns bootstrap estimate of 100.0*(1-alpha) CI for statistic."""
n = len(data)
idx = npr.randint(0, n, (num_samples, n))
samples = data[idx]
stat = numpy.sort(statistic(samples, 1))
return (stat[int((alpha/2.0)*num_samples)],
stat[int((1-alpha/2.0)*num_samples)])
# calculate the p-value difference
def permutation_resampling(self,case, control, num_samples=50, statistic=numpy.mean):
'''calculate the pvalue of two data sets using a resampling approach'''
observed_diff = abs(statistic(case) - statistic(control))
num_case = len(case)
combined = numpy.concatenate([case, control])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = numpy.mean(xs[:num_case]) - numpy.mean(xs[num_case:])
diffs.append(diff)
pval = (numpy.sum(diffs > observed_diff) +
numpy.sum(diffs < -observed_diff))/float(num_samples)
return pval, observed_diff, diffs
def calculate_pvalue_permutation(self,data_1_I,data_2_I,n_permutations_I=10,n_resamples_I=10):
'''calculate the pvalue of two data by determining
the lack of overlap between sample points using a permutation test.
If the sample points of the data sets is not equal,
a subset of samples of matching length is resampled from the larger data set'''
data_1 = None; #sample set with fewer points
data_2 = None; #sample set with more points
n_resamples = 0;
# check the length of data_1 and data_2
if len(data_1_I)>len(data_2_I):
data_1=numpy.array(data_2_I);
data_2=numpy.array(data_1_I);
n_resamples=n_resamples_I;
elif len(data_1_I)<len(data_2_I):
data_1=numpy.array(data_1_I);
data_2=numpy.array(data_2_I);
n_resamples=n_resamples_I;
else:
data_1=numpy.array(data_1_I);
data_2=numpy.array(data_2_I);
n_samples_min = len(data_1);
vals = []
for i in range(0,n_permutations_I):
if n_resamples==0:
cond1 = numpy.random.permutation(data_1)
cond2 = numpy.random.permutation(data_2)
z = cond1 - cond2
x = len(z[z>0]) + 1
y = len(z[z<0]) + 1
k = min(x,y)
vals.append(k)
else:
cond1 = numpy.random.permutation(data_1)
cond2 = numpy.random.permutation(data_2)
for resample in range(n_resamples):
cond2_int = numpy.random.randint(0,n_samples_min);
z = cond1 - cond2[cond2_int]
x = len(z[z>0]) + 1
y = len(z[z<0]) + 1
k = min(x,y)
vals.append(k)
p = numpy.mean(vals)/len(data_1)*2
return p;
# calculate the interquartiles
def calculate_interquartiles(self,data_I,iq_range_I = [25,75]):
'''compute the interquartiles and return the min, max, median, iq1 and iq3'''
min_O = numpy.min(data_I);
max_O = numpy.max(data_I);
iq_1_O, iq_2_O = numpy.percentile(data_I, iq_range_I)
median_O = numpy.median(data_I);
return min_O, max_O, median_O, iq_1_O, iq_2_O;
# linear regression
def calculate_regressionParameters(self,concentrations_I,ratios_I,dilution_factors_I,fit_I,weighting_I,use_area_I):
'''calculate regression parameters for a given component
NOTE: intended to be used in a loop'''
# input:
# concentrations_I
# ratios_I
# dilution_factors_I
# fit_I
# weighting_I
# use_area_I
# ouput:
# slope
# intercept
# correlation
# lloq
# uloq
# points
# note need to make complimentary method to query concentrations, ratios, and dilution factors
# for each component prior to calling this function
#TODO
return
def calculate_growthRate(self,time_I,biomass_I):
'''calculate exponential growth'''
x = numpy.array(time_I);
y = numpy.log(biomass_I); #weight the biomass by the natural logarithmn
slope, intercept, r_value, p_value, std_err = linregress(x,y);
r2 = r_value**2; #coefficient of determination
return slope, intercept, r2, p_value, std_err;
def interpolate_biomass(self,time_I, slope, intercept):
'''interpolate the biomass from an exponential fit of the growth rate'''
biomass = [];
for t in time_I:
biomass.append(t*slope+intercept);
return biomass;
def calculate_uptakeAndSecretionRate(self,dcw_I,conc_I,gr_I):
'''calculate uptake and secretion rates'''
x = numpy.array(dcw_I);
y = numpy.array(conc_I);
slope, intercept, r_value, p_value, std_err = linregress(x,y);
r2 = r_value**2; #coefficient of determination
rate = slope*gr_I;
return slope, intercept, r2, p_value, std_err, rate;
def interpolate_biomass(self,time_I, slope, intercept):
'''interpolate the biomass from an exponential fit of the growth rate'''
biomass = [];
for t in time_I:
biomass.append(t*slope+intercept);
return biomass;
# smoothing functions
def fit_trajectories(self,x_I,y_I,fit_func_I='lowess',plot_textLabels_I=None,plot_fit_I=False):
'''fit trajectory growth rate data to a smoothing function'''
#Input:
# x_I = ale_time
# y_I = growth_rate
#Output:
# x_O = ale_time_fitted
# y_O = growth_rate_fitted
#cnt = 1;
x = [];
y = [];
x = x_I;
y = y_I;
if fit_func_I=='spline':
#spline
tck = splrep(x,y,k=3,s=.025) #no smoothing factor
#tck = splrep(x,y,k=3,task=-1,t=10) #no smoothing factor
x2 = linspace(min(x),max(x),500)
y2_spline= splev(x2,tck)
y2 = numpy.zeros_like(y2_spline);
for i,y2s in enumerate(y2_spline):
if i==0:
y2[i] = y2s;
elif i!=0 and y2s<y2[i-1]:
y2[i] = y2[i-1];
else:
y2[i] = y2s;
elif fit_func_I=='movingWindow':
#moving window filter
x2 = numpy.array(x);
y2 = smooth(numpy.array(y),window_len=10, window='hanning');
elif fit_func_I=='legendre':
#legendre smoothing optimization
smooth = legendre_smooth(len(x),1,1e-4,25)
x2 = numpy.array(x);
y2 = smooth.fit(numpy.array(y))
elif fit_func_I=='lowess':
#lowess
x2 = numpy.array(x);
y2_lowess = lowess.lowess(x2,numpy.array(y),f=0.1,iter=100)
y2 = numpy.zeros_like(y2_lowess);
for i,y2s in enumerate(y2_lowess):
if i==0:
y2[i] = y2s;
elif i!=0 and y2s<y2[i-1]:
y2[i] = y2[i-1];
else:
y2[i] = y2s;
else:
print("fit function not recongnized");
if plot_fit_I:
##QC plot using MatPlotLib
# Create a Figure object.
fig = pp.figure();
# Create an Axes object.
ax = fig.add_subplot(1,1,1) # one row, one column, first plot
## Add a title.
#ax.set_title(k['sample_label'])
# Set the axis
pp.axis([0,max(x),0,max(y)+0.1]);
# Add axis labels.
ax.set_xlabel('Time [days]')
ax.set_ylabel('GR [hr-1]')
## Label data points
#tck = splrep(x,y,k=3,s=1.); #spline fit with very high smoothing factor
#x_days = ALEsKOs_textLabels[k['sample_name_abbreviation']]['day']
#y_days = splev(x_days,tck)
#for i,txt in enumerate(ALEsKOs_textLabels[k['sample_name_abbreviation']]['dataType']):
# ax.annotate(txt, (x_days[i],y_days[i]-.15))
# Create the plot
#pp.plot(x_days,y_days,'rx',x,y,'b.',x2,y2,'g')
pp.plot(x,y,'b.',x2,y2,'g')
#display the plot
pp.show()
#record
x_O = [];
y_O = [];
x_O = x2;
y_O = y2;
#cnt += 1;
return x_O, y_O;
# other
def null(self, A, eps=1e-6):
u, s, vh = numpy.linalg.svd(A,full_matrices=1,compute_uv=1)
null_rows = [];
rank = numpy.linalg.matrix_rank(A)
for i in range(A.shape[1]):
if i<rank:
null_rows.append(False);
else:
null_rows.append(True);
null_space = scipy.compress(null_rows, vh, axis=0)
return null_space.T
# heatmap
def heatmap(self,data_I,row_labels_I,column_labels_I,
row_pdist_metric_I='euclidean',row_linkage_method_I='complete',
col_pdist_metric_I='euclidean',col_linkage_method_I='complete'):
'''Generate a heatmap using pandas and scipy'''
"""dendrogram documentation:
linkage Methods:
single(y) Performs single/min/nearest linkage on the condensed distance matrix y
complete(y) Performs complete/max/farthest point linkage on a condensed distance matrix
average(y) Performs average/UPGMA linkage on a condensed distance matrix
weighted(y) Performs weighted/WPGMA linkage on the condensed distance matrix.
centroid(y) Performs centroid/UPGMC linkage.
median(y) Performs median/WPGMC linkage.
ward(y) Performs Ward's linkage on a condensed or redundant distance matrix.
Output:
'color_list': A list of color names. The k?th element represents the color of the k?th link.
'icoord' and 'dcoord': Each of them is a list of lists. Let icoord = [I1, I2, ..., Ip] where Ik = [xk1, xk2, xk3, xk4] and dcoord = [D1, D2, ..., Dp] where Dk = [yk1, yk2, yk3, yk4], then the k?th link painted is (xk1, yk1) - (xk2, yk2) - (xk3, yk3) - (xk4, yk4).
'ivl': A list of labels corresponding to the leaf nodes.
'leaves': For each i, H[i] == j, cluster node j appears in position i in the left-to-right traversal of the leaves, where \(j < 2n-1\) and \(i < n\). If j is less than n, the i-th leaf node corresponds to an original observation. Otherwise, it corresponds to a non-singleton cluster."""
#parse input into col_labels and row_labels
#TODO: pandas is not needed for this.
mets_data = | pd.DataFrame(data=data_I, index=row_labels_I, columns=column_labels_I) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.rcParams['figure.dpi'] = 160
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import multiprocessing
from singlecellmultiomics.bamProcessing.bamBinCounts import generate_commands, count_methylation_binned
import argparse
from colorama import Fore, Style
from singlecellmultiomics.utils import dataframe_to_wig
from singlecellmultiomics.methylation import MethylationCountMatrix
from singlecellmultiomics.bamProcessing.bamFunctions import get_reference_from_pysam_alignmentFile
from colorama import Fore,Style
from collections import defaultdict, Counter
from multiprocessing import Pool
from datetime import datetime
import pysam
from singlecellmultiomics.bamProcessing import get_contig_sizes, get_contig_size
from singlecellmultiomics.bamProcessing.bamBinCounts import generate_commands, read_counts
def sample_dict():
return defaultdict(Counter)
def methylation_to_cut_histogram(args):
(alignments_path, bin_size, max_fragment_size, \
contig, start, end, \
min_mq, alt_spans, key_tags, dedup, kwargs) = args
distance_methylation = defaultdict(sample_dict) # sample - > distance -> context(ZzHhXx) : obs
max_dist = 1000
# Define which reads we want to count:
known = set()
if 'known' in kwargs and kwargs['known'] is not None:
# Only ban the very specific TAPS conversions:
try:
with pysam.VariantFile(kwargs['known']) as variants:
for record in variants.fetch(contig, start, end):
if record.ref=='C' and 'T' in record.alts:
known.add( record.pos)
if record.ref=='G' and 'A' in record.alts:
known.add(record.pos)
except ValueError:
# This happends on contigs not present in the vcf
pass
p = 0
start_time = datetime.now()
with pysam.AlignmentFile(alignments_path, threads=4) as alignments:
# Obtain size of selected contig:
contig_size = get_contig_size(alignments, contig)
if contig_size is None:
raise ValueError('Unknown contig')
# Determine where we start looking for fragments:
f_start = max(0, start - max_fragment_size)
f_end = min(end + max_fragment_size, contig_size)
for p, read in enumerate(alignments.fetch(contig=contig, start=f_start,
stop=f_end)):
if p%50==0 and 'maxtime' in kwargs and kwargs['maxtime'] is not None:
if (datetime.now() - start_time).total_seconds() > kwargs['maxtime']:
print(f'Gave up on {contig}:{start}-{end}')
break
if not read_counts(read, min_mq=min_mq, dedup=dedup):
continue
tags = dict(read.tags)
for i, (qpos, methylation_pos) in enumerate(read.get_aligned_pairs(matches_only=True)):
# Don't count sites outside the selected bounds
if methylation_pos < start or methylation_pos >= end:
continue
call = tags['XM'][i]
if call=='.':
continue
sample = read.get_tag('SM')
distance = abs(read.get_tag('DS') - methylation_pos)
if distance>max_dist:
continue
distance_methylation[sample][(read.is_read1, read.is_reverse, distance)][call] +=1
return distance_methylation
threads = None
def get_distance_methylation(bam_path,
bp_per_job: int,
min_mapping_qual: int = None,
skip_contigs: set = None,
known_variants: str = None,
maxtime: int = None,
head: int=None,
threads: int = None,
**kwargs
):
all_kwargs = {'known': known_variants,
'maxtime': maxtime,
'threads':threads
}
all_kwargs.update(kwargs)
commands = generate_commands(
alignments_path=bam_path,
key_tags=None,
max_fragment_size=0,
dedup=True,
head=head,
bin_size=bp_per_job,
bins_per_job= 1, min_mq=min_mapping_qual,
kwargs=all_kwargs,
skip_contigs=skip_contigs
)
distance_methylation = defaultdict(sample_dict) # sample - > distance -> context(ZzHhXx) : obs
with Pool(threads) as workers:
for result in workers.imap_unordered(methylation_to_cut_histogram, commands):
for sample, data_for_sample in result.items():
for distance, context_obs in data_for_sample.items():
distance_methylation[sample][distance] += context_obs
return distance_methylation
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Extract methylation levels relative to cut site (DS tag) from bam file""")
argparser.add_argument('bamfile', metavar='bamfile', type=str)
argparser.add_argument('-bp_per_job', default=5_000_000, type=int, help='Amount of basepairs to be processed per thread per chunk')
argparser.add_argument('-threads', default=None, type=int, help='Amount of threads to use for counting, None to use the amount of available threads')
fi = argparser.add_argument_group("Filters")
fi.add_argument('-min_mapping_qual', default=40, type=int)
fi.add_argument('-head', default=None, type=int,help='Process the first n bins')
fi.add_argument('-skip_contigs', type=str, help='Comma separated contigs to skip', default='MT,chrM')
fi.add_argument('-known_variants',
help='VCF file with known variants, will be not taken into account as methylated/unmethylated',
type=str)
og = argparser.add_argument_group("Output")
og.add_argument('-prefix', default='distance_calls', type=str, help='Prefix for output files')
args = argparser.parse_args()
print('Obtaining counts ', end="")
r = get_distance_methylation(bam_path = args.bamfile,
bp_per_job = args.bp_per_job,
known_variants = args.known_variants,
skip_contigs = args.skip_contigs.split(','),
min_mapping_qual=args.min_mapping_qual,
head = args.head,
threads=args.threads,
)
print(f" [ {Fore.GREEN}OK{Style.RESET_ALL} ] ")
for ctx in 'zhx':
beta = {}
met = {}
un = {}
for sample, sample_data in r.items():
beta[sample] = {}
met[sample] = {}
un[sample] = {}
for distance, contexts in sample_data.items():
if ctx in contexts or ctx.upper() in contexts:
beta[sample][distance] = contexts[ctx.upper()]/(contexts[ctx.upper()]+contexts[ctx])
met[sample][distance] = contexts[ctx.upper()]
un[sample][distance] = contexts[ctx]
pd.DataFrame(beta).sort_index().T.sort_index().to_csv(f'{args.prefix}_beta_{ctx}.csv')
| pd.DataFrame(beta) | pandas.DataFrame |
def three_way_factorial_ANOVA(df_lists):
# 繰返し数、因子1,2,3の長さを取得
df_lists_len = len(df_lists)
f1_len = len(df_lists[0][0].columns)
f2_len = len(df_lists[0][0].index)
f3_len = len(df_lists[0])
# それぞれの因子の効果を求める
f1_mean = sum([df[i].sum(axis=1) for df in df_lists for i in range(df_lists_len)]) / (f1_len*f3_len*df_lists_len)
f2_mean = sum([df[i].sum() for df in df_lists for i in range(df_lists_len)]) / (f2_len*f3_len*df_lists_len)
f3_mean = sum([pd.Series([df[i].mean().mean() for i in range(df_lists_len)]) for df in df_lists]) / f3_len
f_mean = sum([df[i].sum().sum() for df in df_lists for i in range(df_lists_len)]) / (f1_len*f2_len*f3_len*df_lists_len)
f1_effect, f2_effect, f3_effect = f1_mean - f_mean, f2_mean - f_mean, f3_mean - f_mean
# 因子変動S1,S2,S3を求める
S1 = ((f1_effect**2) * (f1_len*f3_len*df_lists_len)).sum()
S2 = ((f2_effect**2) * (f2_len*f3_len*df_lists_len)).sum()
S3 = ((f3_effect**2) * (f1_len*f2_len*df_lists_len)).sum()
# 繰返し分を全て平均したテーブルを作成する
df_ave = [0 for i in range(f3_len)]
for i in range(f3_len):
for j in range(df_lists_len):
df_ave[i] += df_lists[j][i]
df_ave[i] /= df_lists_len
# 因子1,2の交互作用による変動S12を求める
df_12 = (sum(df_ave) / f3_len) - f_mean
S1_2 = (df_12**2).sum().sum() * (f3_len*df_lists_len)
S12 = S1_2 - S1 - S2
# 因子1,3の交互作用による変動S13を求める
df_13 = pd.DataFrame([df.mean(axis=1) for df in df_ave]) - f_mean
S1_3 = (df_13**2).sum().sum() * (f1_len*df_lists_len)
S13 = S1_3 - S1 - S3
# 因子2,3の交互作用による変動S23を求める
df_23 = pd.DataFrame([df.mean() for df in df_ave]) - f_mean
S2_3 = (df_23**2).sum().sum() * (f2_len*df_lists_len)
S23 = S2_3 - S2 - S3
# 因子1,2,3の交互作用による変動S123を求める
df_123 = df_ave - f_mean
S1_2_3 = (df_123**2).sum().sum() * df_lists_len
S123 = S1_2_3 - S1 - S2 - S3 - S12 - S13 - S23
# 誤差変動Seを求める
St = sum([((df_lists[i][j]-f_mean)**2).sum().sum() for i in range(df_lists_len) for j in range(f3_len)])
Se = St - S1 - S2 - S3 - S12 - S13 - S23 - S123
# 自由度dfを求める
df1 = f2_len - 1
df2 = f1_len - 1
df3 = f3_len - 1
df12 = df1 * df2
df13 = df1 * df3
df23 = df2 * df3
df123 = df1 * df2 * df3
dfe = f1_len*f2_len*f3_len*(df_lists_len - 1)
dft = df1 + df2 + df3 + df12 + df13 + df23 + df123 + dfe
# 不偏分散Vを求める
V1 = S1 / df1
V2 = S2 / df2
V3 = S3 / df3
V12 = S12 / df12
V13 = S13 / df13
V23 = S23 / df23
V123 = S123 / df123
Ve = Se / dfe
# F値を求める
F1 = V1 / Ve
F2 = V2 / Ve
F3 = V3 / Ve
F12 = V12 / Ve
F13 = V13 / Ve
F23 = V23 / Ve
F123 = V123 / Ve
# p値を求める
p1 = 1 - st.f.cdf(F1, dfn=df1, dfd=dfe)
p2 = 1 - st.f.cdf(F2, dfn=df2, dfd=dfe)
p3 = 1 - st.f.cdf(F3, dfn=df3, dfd=dfe)
p12 = 1 - st.f.cdf(F12, dfn=df12, dfd=dfe)
p13 = 1 - st.f.cdf(F13, dfn=df13, dfd=dfe)
p23 = 1 - st.f.cdf(F23, dfn=df23, dfd=dfe)
p123 = 1 - st.f.cdf(F123, dfn=df123, dfd=dfe)
# 分散分析表を作成する
df_S = pd.Series([S1, S2, S3, S12, S13, S23, S123, Se, St])
df_df = pd.Series([df1, df2, df3, df12, df13, df23, df123, dfe, dft])
df_V = pd.Series([V1, V2, V3, V12, V13, V23, V123, Ve])
df_F = pd.Series([F1, F2, F3, F12, F13, F23, F123])
df_p = pd.DataFrame([p1, p2, p3, p12, p13, p23, p123], columns=['p'])
df_p['sign'] = df_p['p'].apply(lambda x : '**' if x < 0.01 else '*' if x < 0.05 else '')
df_ANOVA = pd.concat([df_S, df_df, df_V, df_F, df_p], axis=1).set_axis(['S','df','V','F','p','sign'], axis=1).set_axis(['Indexes', 'Columns', 'Tables', 'Indexes*Columns', 'Indexes*Tables', 'Columns*Tables', 'Indexes*Columns*Tables', 'Error', 'Total']).rename_axis('ANOVA_table', axis=1).fillna('')
# 因子の効果をデータフレームにまとめる
df_effect_indexes = pd.DataFrame(f1_effect).set_axis(['mean'], axis=1)
df_effect_columns = | pd.DataFrame(f2_effect) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import mplfinance as mpl
plt.rcParams['legend.facecolor'] = 'darkgray'
############################## RETAIL SALES ##############################
def process_retailsales(path):
data = pd.read_csv(path, index_col=0, parse_dates=True)
data = data.drop('Date', axis=1)
data.IsHoliday = data.IsHoliday.apply(lambda x : int(x) if not pd.isna(x) else np.nan)
return data
def plot_retialsales(data, style='ggplot'):
plt.rcParams['figure.dpi'] = 100
plt.rcParams['figure.figsize'] = (15,7)
plt.style.use(style)
_=data.groupby('Store').Weekly_Sales.plot(title='Weekly Sales all stores')
_=plt.xlabel('Dates')
_=plt.ylabel('Qty')
############################## SUNSPOTS ##############################
def process_sunspots(path):
data=pd.read_csv(path, index_col=[0], usecols=[1,2], parse_dates=True)
data.index.name = 'Month'
data.columns = ['MMTS']
data = data.sort_index()
return data
def plot_sunspots(data, style='ggplot'):
plt.rcParams['figure.dpi'] = 100
plt.rcParams['figure.figsize'] = (15,7)
plt.style.use(style)
_=data.plot(title='Monthly Mean of Sunspots observed')
_=plt.title('Sunspots')
_=plt.ylabel('Mean Sunspots Numbers')
############################## USA ECONOMIC ##############################
def process_usaeconomic(path):
data = pd.read_csv(path, index_col=0, parse_dates=True)
data.index.name = 'Date'
return data
def plot_usaeconomic(data, style='ggplot'):
plt.rcParams['figure.dpi'] = 100
plt.rcParams['figure.figsize'] = (15,7)
plt.style.use(style)
_=data.plot(title='USA-Consumption, Income, Production, Savings & Unemployment Pct Changes')
############################## VISITORS TO 20 REGIONS ##############################
def process_20rvisitors(path):
data = pd.read_csv(path, index_col=0, parse_dates=True)
data.index=pd.to_datetime(data.index.str.replace(' ',''))
data.columns = ['Regions', 'Visitors']
data.index.name = 'Quarter'
return data
def plot_20rvisitors(data, style='ggplot'):
plt.rcParams['figure.dpi'] = 100
plt.rcParams['figure.figsize'] = (15,7)
plt.style.use(style)
_=sns.lineplot(x='Quarter', y='Visitors', hue='Regions', data=data.reset_index())
_=plt.title('Quaterly Vistors to 20 regions in Australlia')
_=plt.ylabel('Visitors (Million)')
############################## ELECTRICITY PRODUCTION ##############################
def process_electricityprod(path):
data = | pd.read_csv(path, index_col=0, parse_dates=True) | pandas.read_csv |
#!/home/eee/ug/15084015/miniconda3/envs/btp/bin/python
from subprocess import call
import pdb
import pandas as pd
from datetime import datetime, timedelta
def scp(directory, date):
cmd = (
"scp -i /home/eee/ug/15084015/.ssh/btp.pem predictions/%s/%s.csv [email protected]:/var/www/html/btech_project/server/predictions/%s/"
% (directory, date, directory)
)
call(cmd.split(" "))
"""Simple Moving Average (SMA)"""
# pdb.set_trace()
p = 5 # number of days to take average of
n = 24 * 12 # hours * number of values per hour
time = ['%02d:%02d' % (x, y) for x in range(24) for y in range(0, 60, 5)]
#time = ['00:00', '00:05'...
data = pd.read_csv(
"monthdata.csv",
header=None,
index_col=["datetime"],
names=["datetime", "load"],
parse_dates=["datetime"],
infer_datetime_format=True,
)
# import pdb; pdb.set_trace()
print(data.index[-1])
date = datetime.today().date().strftime("%d-%m-%Y")
print('date today:', date)
load = data["load"].values
pred = [0] * n
for i in range(n):
forecast = 0
for j in range(1, p + 1):
forecast += load[-(j * n) + i] / p
pred[i] = (time[i], forecast)
df = pd.DataFrame.from_records(pred, columns=["time", "load"])
df.to_csv("predictions/SMA/%s.csv" % date, index=False)
scp("SMA", date)
"""Simple Exponential Smoothing (SES)"""
dict = {}
m = int(len(data) / n)
alpha = [0] * n
alphamin = [0] * n
forecast = [0] * len(data)
forecast[:n] = [load[j] for j in range(n)]
for j in range(n):
mse = [0] * 9
for k in range(1, 10):
alpha[j] = k * 0.1
mse[k - 1] += (forecast[j] - load[n + j]) ** 2
for i in range(2, m):
forecast[((i - 1) * n) + j] = (alpha[j] * load[((i - 1) * n) + j]) + (
(1 - alpha[j]) * forecast[((i - 2) * n) + j]
)
mse[k - 1] += (forecast[((i - 1) * n) + j] - load[(i * n) + j]) ** 2
min = mse[0]
alphamin[j] = 0.1
for i in range(1, 9):
if mse[i] < min:
min = mse[i]
alphamin[j] = (i + 1) * 0.1
a = 10
forecast2 = [0] * (a * n)
forecast2[:n] = [load[i - (a * n)] for i in range(n)]
for j in range(1, a):
for i in range(n):
forecast2[i + (j * n)] = (alphamin[i] * load[i - (a * n) + (j * n)]) + (
(1 - alphamin[i]) * forecast2[i + (j * n) - n]
)
pred = [0] * n
for i in range(n):
pred[i] = (time[i], forecast2[-n:][i])
labels = ["time", "load"]
df = pd.DataFrame.from_records(pred, columns=labels)
df.to_csv("predictions/SES/%s.csv" % date, index=False)
scp("SES", date)
"""Weighted Moving Average (WMA)"""
weights = [0.8019, 0.0426, 0.0226, -0.0169, 0.1497]
pred = [0] * n
for i in range(n):
forecast = 0
for j in range(1, len(weights) + 1):
forecast += load[-(j * n) + i] * weights[j - 1]
pred[i] = (time[i], forecast)
labels = ["time", "load"]
df = | pd.DataFrame.from_records(pred, columns=labels) | pandas.DataFrame.from_records |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = | pd.Series([15., 20., 30.], dtype='float') | pandas.Series |
# -*- coding: utf-8 -*-
import os
import pytest
import pandas as pd
from anacode.api import writers
from anacode.agg import aggregation as agg
@pytest.mark.parametrize('call_type,dataset_name,data', [
('categories', 'categories', pd.DataFrame([1])),
('concepts', 'concepts', pd.DataFrame([2])),
('concepts', 'concepts_surface_strings', | pd.DataFrame([3]) | pandas.DataFrame |
import unittest
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import os
import numpy as np
import pandas as pd
import cmapPy.pandasGEXpress.GCToo as GCToo
import cmapPy.pandasGEXpress.parse_gct as pg
import cmapPy.pandasGEXpress.write_gct as wg
FUNCTIONAL_TESTS_PATH = "../functional_tests"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
class TestWriteGct(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Create dfs to be used by tests
cls.data_df = pd.DataFrame(
[[1, 2, 3], [5, 7, np.nan], [13, 17, -19], [0, 23, 29]],
index=pd.Index(["rid1", "rid2", "rid3", "rid4"], name="rid"),
columns=pd.Index(["cid1", "cid2", "cid3"], name="cid"), dtype=np.float32)
cls.row_metadata_df = pd.DataFrame(
[["Analyte 11", 11, "dp52"],
["Analyte 12", 12, "dp52"],
["Analyte 13", 13, "dp53"],
["Analyte 14", 14, "dp54"]],
index=pd.Index(["rid1", "rid2", "rid3", "rid4"], name="rid"),
columns=pd.Index(["pr_analyte_id", "pr_analyte_num", "pr_bset_id"], name="rhd"))
cls.col_metadata_df = pd.DataFrame(
[[8.38, np.nan, "DMSO", "24 h"],
[7.7, np.nan, "DMSO", "24 h"],
[8.18, np.nan, "DMSO", "24 h"]],
index=pd.Index(["cid1", "cid2", "cid3"], name="cid"),
columns=pd.Index(["qc_iqr", "pert_idose", "pert_iname", "pert_itime"], name="chd"))
def test_write(self):
out_name = os.path.join(FUNCTIONAL_TESTS_PATH, "test_write_out.gct")
gctoo = GCToo.GCToo(data_df=self.data_df,
row_metadata_df=self.row_metadata_df,
col_metadata_df=self.col_metadata_df)
wg.write(gctoo, out_name, data_null="NaN",
metadata_null="-666", filler_null="-666")
# Read in the gct and verify that it's the same as gctoo
new_gct = pg.parse(out_name)
| pd.util.testing.assert_frame_equal(new_gct.data_df, gctoo.data_df) | pandas.util.testing.assert_frame_equal |
#%%
import pickle
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import numpy as np
import pandas as pd
from itertools import product
import seaborn as sns
import matplotlib.gridspec as gridspec
#%%
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def get_fte_bte(err, single_err):
bte = [[] for i in range(10)]
te = [[] for i in range(10)]
fte = []
for i in range(10):
for j in range(i,10):
#print(err[j][i],j,i)
bte[i].append(err[i][i]/err[j][i])
te[i].append(single_err[i]/err[j][i])
for i in range(10):
fte.append(single_err[i]/err[i][i])
return fte,bte,te
def calc_mean_bte(btes,task_num=10,reps=6):
mean_bte = [[] for i in range(task_num)]
for j in range(task_num):
tmp = 0
for i in range(reps):
tmp += np.array(btes[i][j])
tmp=tmp/reps
mean_bte[j].extend(tmp)
return mean_bte
def calc_mean_te(tes,task_num=10,reps=6):
mean_te = [[] for i in range(task_num)]
for j in range(task_num):
tmp = 0
for i in range(reps):
tmp += np.array(tes[i][j])
tmp=tmp/reps
mean_te[j].extend(tmp)
return mean_te
def calc_mean_fte(ftes,task_num=10,reps=6):
fte = np.asarray(ftes)
return list(np.mean(np.asarray(fte),axis=0))
def get_error_matrix(filename):
multitask_df, single_task_df = unpickle(filename)
err = [[] for _ in range(10)]
for ii in range(10):
err[ii].extend(
1 - np.array(
multitask_df[multitask_df['base_task']==ii+1]['accuracy']
)
)
single_err = 1 - np.array(single_task_df['accuracy'])
return single_err, err
def stratified_scatter(te_dict,axis_handle,s,color):
algo = list(te_dict.keys())
total_alg = len(algo)
total_points = len(te_dict[algo[0]])
pivot_points = np.arange(-.25, (total_alg+1)*1, step=1)
interval = .7/(total_points-1)
for algo_no,alg in enumerate(algo):
for no,points in enumerate(te_dict[alg]):
axis_handle.scatter(
pivot_points[algo_no]+interval*no,
te_dict[alg][no],
s=s,
c=color[algo_no]
)
#%%
### MAIN HYPERPARAMS ###
ntrees = 10
slots = 10
task_num = 10
shifts = 6
total_alg = 9
alg_name = ['L2N','L2F','L2F-','Prog-NN', 'DF-CNN','LwF','EWC','O-EWC','SI']
model_file_500 = ['dnn0','fixed_uf10','uf10','Prog_NN','DF_CNN', 'LwF','EWC', 'Online_EWC', 'SI']
model_file_5000 = ['dnn0','fixed_uf5000_40','uf5000_40','Prog_NN','DF_CNN', 'LwF','EWC', 'Online_EWC', 'SI']
btes_500 = [[] for i in range(total_alg)]
ftes_500 = [[] for i in range(total_alg)]
tes_500 = [[] for i in range(total_alg)]
btes_5000 = [[] for i in range(total_alg)]
ftes_5000 = [[] for i in range(total_alg)]
tes_5000 = [[] for i in range(total_alg)]
########################
#%% code for 5000 samples
reps = shifts
for alg in range(total_alg):
count = 0
te_tmp = [[] for _ in range(reps)]
bte_tmp = [[] for _ in range(reps)]
fte_tmp = [[] for _ in range(reps)]
for shift in range(shifts):
if alg < 3:
filename = 'result/result/'+model_file_5000[alg]+'_'+str(shift+1)+'_0'+'.pickle'
else:
filename = 'benchmarking_algorthms_result/'+model_file_5000[alg]+'_'+str(shift+1)+'.pickle'
multitask_df, single_task_df = unpickle(filename)
single_err, err = get_error_matrix(filename)
fte, bte, te = get_fte_bte(err,single_err)
te_tmp[count].extend(te)
bte_tmp[count].extend(bte)
fte_tmp[count].extend(fte)
count+=1
tes_5000[alg].extend(calc_mean_te(te_tmp,reps=reps))
btes_5000[alg].extend(calc_mean_bte(bte_tmp,reps=reps))
ftes_5000[alg].extend(calc_mean_fte(fte_tmp,reps=reps))
#%% code for 500 samples
reps = slots*shifts
for alg in range(total_alg):
count = 0
te_tmp = [[] for _ in range(reps)]
bte_tmp = [[] for _ in range(reps)]
fte_tmp = [[] for _ in range(reps)]
for slot in range(slots):
for shift in range(shifts):
if alg < 3:
filename = 'result/result/'+model_file_500[alg]+'_'+str(shift+1)+'_'+str(slot)+'.pickle'
else:
filename = 'benchmarking_algorthms_result/'+model_file_500[alg]+'_'+str(shift+1)+'_'+str(slot)+'.pickle'
multitask_df, single_task_df = unpickle(filename)
single_err, err = get_error_matrix(filename)
fte, bte, te = get_fte_bte(err,single_err)
te_tmp[count].extend(te)
bte_tmp[count].extend(bte)
fte_tmp[count].extend(fte)
count+=1
tes_500[alg].extend(calc_mean_te(te_tmp,reps=reps))
btes_500[alg].extend(calc_mean_bte(bte_tmp,reps=reps))
ftes_500[alg].extend(calc_mean_fte(fte_tmp,reps=reps))
#%%
te_500 = {'L2N':np.zeros(10,dtype=float), 'L2F':np.zeros(10,dtype=float),'L2Fc':np.zeros(10,dtype=float), 'Prog-NN':np.zeros(10,dtype=float), 'DF-CNN':np.zeros(10,dtype=float), 'LwF':np.zeros(10,dtype=float),'EWC':np.zeros(10,dtype=float), 'Online EWC':np.zeros(10,dtype=float), 'SI':np.zeros(10,dtype=float)}
for count,name in enumerate(te_500.keys()):
for i in range(10):
te_500[name][i] = tes_500[count][i][9-i]
df_500 = pd.DataFrame.from_dict(te_500)
df_500 = pd.melt(df_500,var_name='Algorithms', value_name='Transfer Efficieny')
'''mean_te = {'L2N':[np.mean(te['L2N'])],'L2F':[np.mean(te['L2F'])], 'L2Fc':[np.mean(te['L2Fc'])],
'Prog-NN':[np.mean(te['Prog-NN'])],'DF-CNN':[np.mean(te['DF-CNN'])],
'LwF':[np.mean(te['LwF'])],'EWC':[np.mean(te['EWC'])],
'Online EWC':[np.mean(te['Online EWC'])], 'SI':[np.mean(te['SI'])]
}
mean_df = pd.DataFrame.from_dict(mean_te)
mean_df = pd.melt(mean_df,var_name='Algorithms', value_name='Transfer Efficieny')'''
#%%
te_5000 = {'L2N':np.zeros(10,dtype=float), 'L2F':np.zeros(10,dtype=float),'L2Fc':np.zeros(10,dtype=float), 'Prog-NN':np.zeros(10,dtype=float), 'DF-CNN':np.zeros(10,dtype=float), 'LwF':np.zeros(10,dtype=float),'EWC':np.zeros(10,dtype=float), 'Online EWC':np.zeros(10,dtype=float), 'SI':np.zeros(10,dtype=float)}
for count,name in enumerate(te_5000.keys()):
for i in range(10):
te_5000[name][i] = tes_5000[count][i][9-i]
df_5000 = | pd.DataFrame.from_dict(te_5000) | pandas.DataFrame.from_dict |
import cv2
import numpy as np
import base64
import pandas as pd
import plotly.graph_objects as go
from datetime import datetime, time, timedelta, date
import wget
from zipfile import ZipFile
import os
import json
import plotly.express as px
import joblib
# pip install streamlit --upgrade
# pip install streamlit==0.78.0
class Inference:
def __init__(self,model_path="model/model.pkl"):
self.nomi_regioni = ['Abruzzo', 'Basilicata', 'Calabria', 'Campania', 'Emilia-Romagna', '<NAME>', 'Lazio', 'Liguria', 'Lombardia', 'Marche', 'Molise', '<NAME>', '<NAME>', 'Piemonte', 'Puglia', 'Sardegna', 'Sicilia', 'Toscana', 'Umbria', "Valle d'Aosta", 'Veneto']
dict_names = {"bianca":0,"gialla": 1, "arancione": 2, "rossa": 3}
self.names = list(dict_names)
self.model = joblib.load(model_path)
def predict(self,inputs, regione):
idx = self.nomi_regioni.index(regione)
v = [ 0 for i in range(0,len(self.nomi_regioni))]
v[idx] = 1
inputs.extend(v)
X = np.array(inputs,dtype=np.float).reshape(1,-1)
Y_hat = self.model.predict(X)
return self.names[int(Y_hat[0])]
def fig_stats_variation(regione, data_inizio, data_fine,options):
#select = ["deceduti","totale_casi","dimessi_guariti","terapia_intensiva","tamponi","isolamento_domiciliare"]
df = None
title = "Variazione Giornaliera"
if regione=="Italia":
df = get_data_nazione()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
else:
df = get_data_regioni()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
df = df[df["denominazione_regione"]==regione]
# Script to aggregate data
# https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.resample.html
dft = df.copy()
dft = dft.set_index("data")
dft["count"] = [1 for i in range(0,len(df))]
agg = {"count" : "size"}
for s in options:
agg[s] = "median"
dft = dft.resample('1D').agg(agg)
# Variation daily
df = {"data": dft.index[1:]}
for s in options:
start = dft[s][:-1].values
end = dft[s][1:].values
df[s] = ( end - start )
#df[s] = np.round( ( end / start -1 )*100,2)
df = pd.DataFrame(df)
df = df.set_index("data")
#dft.dropna()
#print(dft.head())
# Rolling average variation
#df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
fig = go.Figure()
for name in options:
fig.add_trace(go.Scatter(x=df.index, y=df[name],
mode='lines+markers',#mode='lines+markers',
name=name.replace("_"," "),
hoverlabel_namelength=-1))
fig.update_layout(
showlegend=True,
hovermode = "x",
yaxis_title = "Persone",
#paper_bgcolor = "rgb(0,0,0)" ,
#plot_bgcolor = "rgb(10,10,10)" ,
legend=dict(orientation="h",yanchor="bottom", y=1.02,xanchor="right", x=1,title_text=""),
dragmode="pan",
title=dict(
x = 0.5,
y = 0.05,
text = title,
font=dict(
size = 20,
color = "rgb(0,0,0)"
)
)
)
return fig
def fig_stats(regione, data_inizio, data_fine,options):
#select = ["deceduti","totale_casi","dimessi_guariti","terapia_intensiva","tamponi","isolamento_domiciliare"]
df = None
title = "Andamento Cumulativo"
if regione=="Italia":
df = get_data_nazione()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
df = df.set_index("data")
else:
df = get_data_regioni()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
df = df[df["denominazione_regione"]==regione]
df = df.set_index("data")
#df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
fig = go.Figure()
for name in options:
fig.add_trace(go.Scatter(x=df.index, y=df[name],
mode='lines+markers',#mode='lines+markers',
name=name.replace("_"," "),
hoverlabel_namelength=-1))
fig.update_layout(
showlegend=True,
hovermode = "x",
yaxis_title = "Persone",
#paper_bgcolor = "rgb(0,0,0)" ,
#plot_bgcolor = "rgb(10,10,10)" ,
legend=dict(orientation="h",yanchor="bottom", y=1.02,xanchor="right", x=1,title_text=""),
dragmode="pan",
title=dict(
x = 0.5,
y = 0.05,
text = title,
font=dict(
size = 20,
color = "rgb(0,0,0)"
)
)
)
return fig
def get_stats(regione,data_inizio, data_fine):
select = ["deceduti","totale_casi","dimessi_guariti","variazione_totale_positivi"]
df = None
if regione=="Italia":
df = get_data_nazione()
else:
df = get_data_regioni()
df = df[df["denominazione_regione"]==regione]
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
incremento = ( df.iloc[-1,:][select] - df.iloc[-2,:][select] ) .to_dict()
data = ( df.iloc[-1,:][select]) .to_dict()
df = pd.DataFrame ([data,incremento],columns=select, index=["Situazione","Incremento"])
df = df.rename(columns={"deceduti": "Deceduti", "totale_casi": "Totale Casi", "dimessi_guariti": "Dimessi Guariti","variazione_totale_positivi" : "Var. Totale Positivi" })
return df
def get_nomi_regioni():
df = get_data_regioni()
#df["data"] = [ datetime.strptime(d, "%Y-%m-%dT%H:%M:%S") for d in df["data"]]
return df["denominazione_regione"].unique().tolist()
def get_options():
select = ["deceduti","totale_casi","dimessi_guariti","terapia_intensiva","tamponi","isolamento_domiciliare"]
return select
def get_date():
df = get_data_nazione()
start = df["data"].tolist()[0]
end= df["data"].tolist()[-1]
d = end
date = []
date.append(d.strftime("%Y-%m-%d"))
while (d>start):
t = d -timedelta(days=0, weeks=1)
date.append(t.strftime("%Y-%m-%d"))
d = t
#date = [ d.strftime("%Y-%m-%d") for d in df["data"].dt.date]
return date
def get_data_nazione():
'''
Keys: ['data', 'stato', 'ricoverati_con_sintomi', 'terapia_intensiva',
'totale_ospedalizzati', 'isolamento_domiciliare', 'totale_positivi',
'variazione_totale_positivi', 'nuovi_positivi', 'dimessi_guariti',
'deceduti', 'casi_da_sospetto_diagnostico', 'casi_da_screening',
'totale_casi', 'tamponi', 'casi_testati', 'note',
'ingressi_terapia_intensiva', 'note_test', 'note_casi',
'totale_positivi_test_molecolare',
'totale_positivi_test_antigenico_rapido', 'tamponi_test_molecolare',
'tamponi_test_antigenico_rapido']
'''
#url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv"
url = "data/dpc-covid19-ita-andamento-nazionale.csv"
df = pd.read_csv(url)
df["data"] = [ datetime.strptime(d, "%Y-%m-%dT%H:%M:%S") for d in df["data"]]
return df
def get_data_province():
'''
Keys: ['data', 'stato', 'codice_regione', 'denominazione_regione',
'codice_provincia', 'denominazione_provincia', 'sigla_provincia', 'lat',
'long', 'totale_casi', 'note', 'codice_nuts_1', 'codice_nuts_2',
'codice_nuts_3']
'''
#url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-province/dpc-covid19-ita-province.csv"
url = "data/dpc-covid19-ita-province.csv"
df = pd.read_csv(url)
df["data"] = [ datetime.strptime(d, "%Y-%m-%dT%H:%M:%S") for d in df["data"]]
return df
def get_data_regioni():
'''
Keys: ['data', 'stato', 'codice_regione', 'denominazione_regione', 'lat',
'long', 'ricoverati_con_sintomi', 'terapia_intensiva',
'totale_ospedalizzati', 'isolamento_domiciliare', 'totale_positivi',
'variazione_totale_positivi', 'nuovi_positivi', 'dimessi_guariti',
'deceduti', 'casi_da_sospetto_diagnostico', 'casi_da_screening',
'totale_casi', 'tamponi', 'casi_testati', 'note',
'ingressi_terapia_intensiva', 'note_test', 'note_casi',
'totale_positivi_test_molecolare',
'totale_positivi_test_antigenico_rapido', 'tamponi_test_molecolare',
'tamponi_test_antigenico_rapido', 'codice_nuts_1', 'codice_nuts_2']
'''
#url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv"
url = "data/dpc-covid19-ita-regioni.csv"
df = pd.read_csv(url)
df["data"] = [ datetime.strptime(d, "%Y-%m-%dT%H:%M:%S") for d in df["data"]]
return df
def create_dataset(df_p):
# Data string to datetime
# Le date sono codificate come stringhe. Le vogliamo come datetime
df["data"] = [ datetime.strptime(d, "%Y-%m-%dT%H:%M:%S") for d in df["data"]]
# Filtro emilia - romagna
df = df[df["denomiazione_regione"]=="Emilia-Romagna"]
# Dataset (ultime 4 settimane)
data_end = df["data"].tolist()[-1] # data di oggi
data_start = data_end - timedelta(days=0,weeks=2,hours=0,minutes=0)
df_f = df[ (df["data"]>=data_start) & (df["data"]<=data_end) ]
# Dataset (ultime 2 settimane)
data_end = df["data"].tolist()[-1] # data di oggi
data_start = data_end - timedelta(days=0,weeks=1,hours=0,minutes=0)
df_ff = df[ (df["data"]>=data_start) & (df["data"]<=data_end) ]
# Calcolo Indici Regionali Emilia Romagna
# id1 Totale casi ultime 2 settimate
i1 = df_f["totale_casi"]
#id2 Ricoverati con sentomi utlime 2 settimane
i2 = df_f["ricoverati_con_sintomi"]
#id3 Terapia intensiva ultime 2 settimate
i3 = df_f["terapia_intensiva"]
#id4 Isolamento dociciliare
i4 = df_f["isolamento_domiciliare"]
# id7 % tamponi positivi
i7 = ( df_f["totale_positivi_test_molecolare"] + df_f["totale_positivi_test_antigenico_rapido"] ) / df_f["tamponi"]
# Numero di deceduti nelle 2 settimane
e1 = df_f["deceduti"]
i12 = df_f["casi_da_sospetto_diagnostico"]
i13 = df_ff["totale_casi"]
def get_data_locally():
url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-province/dpc-covid19-ita-province.csv"
df = pd.read_csv(url)
df.to_csv("data/dpc-covid19-ita-province.csv")
url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv"
df = pd.read_csv(url)
df.to_csv("data/dpc-covid19-ita-regioni.csv")
url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv"
df = pd.read_csv(url)
df.to_csv("data/dpc-covid19-ita-andamento-nazionale.csv")
def get_input_prediction(regione):
df = pd.read_csv("data/dpc-covid19-ita-regioni-zone.csv")
df = df[df["denominazione_regione"]==regione]
last = df.tail(1)
ricoverati_con_sintomi = last["ricoverati_con_sintomi"].tolist()[0]
terapia_intensiva = last["terapia_intensiva"].tolist()[0]
totale_ospedalizzati = last["totale_ospedalizzati"].tolist()[0]
totale_positivi = last["totale_positivi"].tolist()[0]
isolamento_domiciliare = last["isolamento_domiciliare"].tolist()[0]
deceduti = last["deceduti"].tolist()[0]
dimessi_guariti = last["dimessi_guariti"].tolist()[0]
nuovi_positivi = last["nuovi_positivi"].tolist()[0]
totale_casi = last["totale_casi"].tolist()[0]
tamponi = last["tamponi"].tolist()[0]
return ricoverati_con_sintomi,terapia_intensiva,totale_ospedalizzati,totale_positivi,isolamento_domiciliare,deceduti,dimessi_guariti,nuovi_positivi,totale_casi,tamponi
def get_map():
df = pd.read_csv("data/dpc-covid19-ita-regioni-zone.csv")
df["data"] = [ datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in df["data"]]
update_date = df["data"].tolist()[-1]
df = df[df["data"].dt.date==update_date]
regions = df["denominazione_regione"].tolist()
colors = df["zona"].tolist()
# https://codicicolori.com/codici-colori-rgb
color_discrete_map = {'unknown': 'rgb(125,125,0)', 'bianca': 'rgb(255,255,255)', 'gialla': 'rgb(255,255,108)', 'arancione': 'rgb(255,165,0)','rossa': 'rgb(255,0,0)'}
df = pd.DataFrame(regions, columns=['Regione'])
df['zona'] =colors
with open('data/regioni.geojson') as f:
italy_regions_geo = json.load(f)
# Choropleth representing the length of region names
fig = px.choropleth(data_frame=df,
geojson=italy_regions_geo,
locations='Regione', # name of dataframe column
featureidkey='properties.NOME_REG', # path to field in GeoJSON feature object with which to match the values passed in to locations
color="zona",
color_discrete_map=color_discrete_map,
scope="europe",
)
fig.update_geos(showcountries=False, showcoastlines=False, showland=False, fitbounds="locations")
title = "Situazione Italiana: " + update_date.strftime("%Y-%m-%d" )
fig.update_layout(title=title) #),margin={"r":0,"t":0,"l":0,"b":0})
return fig
def get_zone_table(regione):
#df = pd.read_csv("data/dpc-covid-19-aree.csv")
df = pd.read_csv("data/dpc-covid19-ita-regioni-zone.csv")
df["data"] = [ datetime.strptime(d, "%Y-%m-%d %H:%M:%S").date() for d in df["data"]]
df = df.sort_values(by=["data"],ascending=False)
if regione!="Italia":
df = df[df["denominazione_regione"]==regione]
inputs = ["data","denominazione_regione","zona","tamponi","deceduti","totale_casi","dimessi_guariti","totale_ospedalizzati","ricoverati_con_sintomi","totale_positivi","isolamento_domiciliare","terapia_intensiva","nuovi_positivi"]
df = df[inputs]
return df
def collect_data():
url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv"
df = pd.read_csv(url)
df.to_csv("data/dpc-covid19-ita-andamento-nazionale.csv")
url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv"
df = pd.read_csv(url)
df.to_csv("data/dpc-covid19-ita-regioni.csv")
url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-province/dpc-covid19-ita-province.csv"
df = pd.read_csv(url)
df.to_csv("data/dpc-covid19-ita-province.csv")
'''
# bash
wget https://github.com/pcm-dpc/COVID-19/raw/master/aree/geojson/dpc-covid-19-aree-nuove-g-json.zip
unzip dpc-covid-19-aree-nuove-g-json.zip
'''
url = 'https://github.com/pcm-dpc/COVID-19/raw/master/aree/geojson/dpc-covid-19-aree-nuove-g-json.zip'
filenameZip = wget.download(url,out="data/dpc-covid-19-aree-nuove-g-json.zip")
with ZipFile(filenameZip, 'r') as zipObj:
zipObj.extractall("data/")
print('File is unzipped')
zipObj.close()
os.remove(filenameZip)
# Process json file to retrieve data
with open('data/dpc-covid-19-aree-nuove-g.json') as f:
data = json.load(f)
data_dict = {
"regione" : [],
"data_inizio": [],
"data_fine": [],
"colore" : [],
"link": []
}
color_dict = {"art.1": "gialla","art.2": "arancione","art.3": "rossa", "art.1 comma 11" : "bianca" }
lista_regioni = []
for d in data["features"]:
p = d["properties"]
data_inizio = datetime.strptime(p["datasetIni"], "%d/%m/%Y")
if p["datasetFin"]==" ":
data_fine = datetime.now() #.date()
else:
data_fine = datetime.strptime(p["datasetFin"], "%d/%m/%Y") #.date()
regione = p["nomeTesto"]
colore = color_dict[ p["legSpecRif"] ]
#if (data_inizio not in data_dict["data_inizio"]):
data_dict["regione"].append(regione)
data_dict["data_inizio"].append(data_inizio)
data_dict["data_fine"].append(data_fine)
data_dict["colore"].append(colore)
data_dict["link"].append(p["legLink"])
df = pd.DataFrame(data_dict)
df.to_csv("data/dpc-covid-19-aree.csv")
# Update dataset Regioni
df_r = | pd.read_csv("data/dpc-covid19-ita-regioni.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Pipeline-GUI for Analysis with MNE-Python
@author: <NAME>
@email: <EMAIL>
@github: https://github.com/marsipu/mne_pipeline_hd
License: BSD (3-clause)
Written on top of MNE-Python
Copyright © 2011-2020, authors of MNE-Python (https://doi.org/10.3389/fnins.2013.00267)
inspired by <NAME>. (2018) (https://doi.org/10.3389/fnins.2018.00006)
"""
from ast import literal_eval
from datetime import datetime
import pandas as pd
from PyQt5.QtCore import QAbstractItemModel, QAbstractListModel, QAbstractTableModel, QModelIndex, Qt
from PyQt5.QtGui import QBrush, QFont
from PyQt5.QtWidgets import QStyle
from mne_pipeline_hd.gui.gui_utils import get_std_icon
class BaseListModel(QAbstractListModel):
""" A basic List-Model
Parameters
----------
data : list()
input existing list here, otherwise defaults to empty list
show_index : bool
Set True if you want to display the list-index in front of each value
drag_drop: bool
Set True to enable Drag&Drop.
"""
def __init__(self, data=None, show_index=False, drag_drop=False, **kwargs):
super().__init__(**kwargs)
self.show_index = show_index
self.drag_drop = drag_drop
if data is None:
self._data = list()
else:
self._data = data
def getData(self, index):
return self._data[index.row()]
def data(self, index, role=None):
if role == Qt.DisplayRole:
if self.show_index:
return f'{index.row()}: {self.getData(index)}'
else:
return str(self.getData(index))
elif role == Qt.EditRole:
return str(self.getData(index))
def rowCount(self, index=QModelIndex()):
return len(self._data)
def insertRows(self, row, count, index=QModelIndex()):
self.beginInsertRows(index, row, row + count - 1)
n = 0
for pos in range(row, row + count):
item_name = f'__new{n}__'
while item_name in self._data:
n += 1
item_name = f'__new{n}__'
self._data.insert(pos, item_name)
self.endInsertRows()
return True
def removeRows(self, row, count, index=QModelIndex()):
self.beginRemoveRows(index, row, row + count - 1)
for item in [self._data[i] for i in range(row, row + count)]:
self._data.remove(item)
self.endRemoveRows()
return True
def flags(self, index):
default_flags = QAbstractListModel.flags(self, index)
if self.drag_drop:
if index.isValid():
return default_flags | Qt.ItemIsDragEnabled | Qt.ItemIsDropEnabled
else:
return default_flags | Qt.ItemIsDropEnabled
else:
return default_flags
def supportedDragActions(self):
if self.drag_drop:
return Qt.CopyAction | Qt.MoveAction
class EditListModel(BaseListModel):
"""An editable List-Model
Parameters
----------
data : list()
input existing list here, otherwise defaults to empty list
show_index: bool
Set True if you want to display the list-index in front of each value
drag_drop: bool
Set True to enable Drag&Drop.
"""
def __init__(self, data, show_index=False, drag_drop=False, **kwargs):
super().__init__(data, show_index, drag_drop, **kwargs)
def flags(self, index=QModelIndex()):
default_flags = BaseListModel.flags(self, index)
if index.isValid():
return default_flags | Qt.ItemIsEditable
else:
return default_flags
def setData(self, index, value, role=None):
if role == Qt.EditRole:
try:
self._data[index.row()] = literal_eval(value)
except (ValueError, SyntaxError):
self._data[index.row()] = value
self.dataChanged.emit(index, index)
return True
return False
class CheckListModel(BaseListModel):
"""
A Model for a Check-List
Parameters
----------
data : list()
list with content to be displayed, defaults to empty list
checked : list()
list which stores the checked items from data
show_index: bool
Set True if you want to display the list-index in front of each value
drag_drop: bool
Set True to enable Drag&Drop.
"""
def __init__(self, data, checked, one_check=False, show_index=False, drag_drop=False, **kwargs):
super().__init__(data, show_index, drag_drop, **kwargs)
self.one_check = one_check
if data is None:
self._data = list()
else:
self._data = data
if checked is None:
self._checked = list()
else:
self._checked = checked
def getChecked(self, index=QModelIndex()):
return self.checked[index.row()]
def data(self, index, role=None):
if role == Qt.DisplayRole:
if self.show_index:
return f'{index.row()}: {self.getData(index)}'
else:
return str(self.getData(index))
if role == Qt.CheckStateRole:
if self.getData(index) in self._checked:
return Qt.Checked
else:
return Qt.Unchecked
def setData(self, index, value, role=None):
if role == Qt.CheckStateRole:
if value == Qt.Checked:
if self.one_check:
self._checked.clear()
self._checked.append(self.getData(index))
else:
if self.getData(index) in self._checked:
self._checked.remove(self.getData(index))
self.dataChanged.emit(index, index)
return True
return False
def flags(self, index=QModelIndex()):
return QAbstractItemModel.flags(self, index) | Qt.ItemIsUserCheckable
class CheckDictModel(BaseListModel):
"""
A Model for a list, which marks items which are present in a dictionary
Parameters
----------
data : list()
list with content to be displayed, defaults to empty list
check_dict : dict()
dictionary which may contain items from data as keys
show_index: bool
Set True if you want to display the list-index in front of each value
drag_drop: bool
Set True to enable Drag&Drop.
yes_bt: str
Supply the name for a qt-standard-icon to mark the items existing in check_dict
no_bt: str
Supply the name for a qt-standard-icon to mark the items not existing in check_dict
Notes
-----
Names for QT standard-icons:
https://doc.qt.io/qt-5/qstyle.html#StandardPixmap-enum
"""
def __init__(self, data, check_dict, show_index=False, drag_drop=False,
yes_bt=None, no_bt=None, **kwargs):
super().__init__(data, show_index, drag_drop, **kwargs)
self._check_dict = check_dict
self.yes_bt = yes_bt or 'SP_DialogApplyButton'
self.no_bt = no_bt or 'SP_DialogCancelButton'
def data(self, index, role=None):
if role == Qt.DisplayRole:
if self.show_index:
return f'{index.row()}: {self.getData(index)}'
else:
return str(self.getData(index))
elif role == Qt.EditRole:
return str(self.getData(index))
elif role == Qt.DecorationRole:
if self.getData(index) in self._check_dict:
return get_std_icon(self.yes_bt)
else:
return get_std_icon(self.no_bt)
class CheckDictEditModel(CheckDictModel, EditListModel):
"""An editable List-Model
Parameters
----------
data : list()
list with content to be displayed, defaults to empty list
check_dict : dict()
dictionary which may contain items from data as keys
show_index: bool
Set True if you want to display the list-index in front of each value
drag_drop: bool
Set True to enable Drag&Drop.
yes_bt: str
Supply the name for a qt-standard-icon to mark the items existing in check_dict
no_bt: str
Supply the name for a qt-standard-icon to mark the items not existing in check_dict
Notes
-----
Names for QT standard-icons:
https://doc.qt.io/qt-5/qstyle.html#StandardPixmap-enum
"""
def __init__(self, data, check_dict, show_index=False, drag_drop=False,
yes_bt=None, no_bt=None):
super().__init__(data, check_dict, show_index, drag_drop, yes_bt, no_bt)
# EditListModel doesn't have to be initialized because in __init__ of EditListModel
# only BaseListModel is initialized which is already done in __init__ of CheckDictModel
class BaseDictModel(QAbstractTableModel):
"""Basic Model for Dictonaries
Parameters
----------
data : dict | OrderedDict | None
Dictionary with keys and values to be displayed, default to empty Dictionary
Notes
-----
Python 3.7 is required to ensure order in dictionary when inserting a normal dict (or use OrderedDict)
"""
def __init__(self, data=None, **kwargs):
super().__init__(**kwargs)
if data is None:
self._data = dict()
else:
self._data = data
def getData(self, index=QModelIndex()):
try:
if index.column() == 0:
return list(self._data.keys())[index.row()]
elif index.column() == 1:
return list(self._data.values())[index.row()]
# Happens, when a duplicate key is entered
except IndexError:
self.layoutChanged.emit()
return ''
def data(self, index, role=None):
if role == Qt.DisplayRole or role == Qt.EditRole:
return str(self.getData(index))
def headerData(self, idx, orientation, role=None):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
if idx == 0:
return 'Key'
elif idx == 1:
return 'Value'
elif orientation == Qt.Vertical:
return str(idx)
def rowCount(self, index=QModelIndex()):
return len(self._data)
def columnCount(self, index=QModelIndex()):
return 2
class EditDictModel(BaseDictModel):
"""An editable model for Dictionaries
Parameters
----------
data : dict | OrderedDict | None
Dictionary with keys and values to be displayed, default to empty Dictionary
only_edit : 'keys' | 'values' | None
Makes only keys or only values editable. Both are editable if None.
Notes
-----
Python 3.7 is required to ensure order in dictionary when inserting a normal dict (or use OrderedDict)
"""
def __init__(self, data=None, only_edit=None, **kwargs):
super().__init__(data, **kwargs)
self.only_edit = only_edit
def setData(self, index, value, role=None):
if role == Qt.EditRole:
try:
value = literal_eval(value)
except (SyntaxError, ValueError):
pass
if index.column() == 0:
self._data[value] = self._data.pop(list(self._data.keys())[index.row()])
elif index.column() == 1:
self._data[list(self._data.keys())[index.row()]] = value
else:
return False
self.dataChanged.emit(index, index, [role])
return True
return False
def flags(self, index=QModelIndex()):
if not self.only_edit:
return QAbstractItemModel.flags(self, index) | Qt.ItemIsEditable
elif index.column() == 0 and self.only_edit == 'keys':
return QAbstractItemModel.flags(self, index) | Qt.ItemIsEditable
elif index.column() == 1 and self.only_edit == 'values':
return QAbstractItemModel.flags(self, index) | Qt.ItemIsEditable
else:
return QAbstractItemModel.flags(self, index)
def insertRows(self, row, count, index=QModelIndex()):
self.beginInsertRows(index, row, row + count - 1)
for n in range(count):
key_name = f'__new{n}__'
while key_name in self._data.keys():
n += 1
key_name = f'__new{n}__'
self._data[key_name] = ''
self.endInsertRows()
return True
def removeRows(self, row, count, index=QModelIndex()):
self.beginRemoveRows(index, row, row + count - 1)
for n in range(count):
self._data.pop(list(self._data.keys())[row + n])
self.endRemoveRows()
return True
class BasePandasModel(QAbstractTableModel):
"""Basic Model for pandas DataFrame
Parameters
----------
data : pandas.DataFrame | None
pandas DataFrame with contents to be displayed, defaults to empty DataFrame
"""
def __init__(self, data=None, **kwargs):
super().__init__(**kwargs)
if data is None:
self._data = pd.DataFrame([])
else:
self._data = data
def getData(self, index=QModelIndex()):
return self._data.iloc[index.row(), index.column()]
def data(self, index, role=None):
if role == Qt.DisplayRole or role == Qt.EditRole:
return str(self.getData(index))
def headerData(self, idx, orientation, role=None):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
return str(self._data.columns[idx])
elif orientation == Qt.Vertical:
return str(self._data.index[idx])
def rowCount(self, index=QModelIndex()):
return len(self._data.index)
def columnCount(self, index=QModelIndex()):
return len(self._data.columns)
class EditPandasModel(BasePandasModel):
""" Editable TableModel for Pandas DataFrames
Parameters
----------
data : pandas.DataFrame | None
pandas DataFrame with contents to be displayed, defaults to empty DataFrame
Notes
-----
The reference of the original input-DataFrame is lost when edited by this Model,
you need to retrieve it directly from the model after editing
"""
def __init__(self, data=None, **kwargs):
super().__init__(data, **kwargs)
def setData(self, index, value, role=None):
if role == Qt.EditRole:
try:
value = literal_eval(value)
# List or Dictionary not allowed here as PandasDataFrame-Item
if isinstance(value, dict) or isinstance(value, list):
value = str(value)
except (SyntaxError, ValueError):
pass
self._data.iloc[index.row(), index.column()] = value
self.dataChanged.emit(index, index, [role])
return True
return False
def setHeaderData(self, index, orientation, value, role=Qt.EditRole):
if role == Qt.EditRole:
if orientation == Qt.Vertical:
# DataFrame.rename does rename all duplicate indices if existent,
# that's why the index is reassigned directly
new_index = list(self._data.index)
new_index[index] = value
self._data.index = new_index
self.headerDataChanged.emit(Qt.Vertical, index, index)
return True
elif orientation == Qt.Horizontal:
# DataFrame.rename does rename all duplicate columns if existent,
# that's why the columns are reassigned directly
new_columns = list(self._data.columns)
new_columns[index] = value
self._data.columns = new_columns
self.headerDataChanged.emit(Qt.Horizontal, index, index)
return True
return False
def flags(self, index=QModelIndex()):
return QAbstractItemModel.flags(self, index) | Qt.ItemIsEditable
def insertRows(self, row, count, index=QModelIndex()):
self.beginInsertRows(index, row, row + count - 1)
add_data = pd.DataFrame(columns=self._data.columns, index=[r for r in range(count)])
if row == 0:
self._data = pd.concat([add_data, self._data])
elif row == len(self._data.index):
self._data = self._data.append(add_data)
else:
self._data = pd.concat([self._data.iloc[:row], add_data, self._data.iloc[row:]])
self.endInsertRows()
return True
def insertColumns(self, column, count, index=QModelIndex()):
self.beginInsertColumns(index, column, column + count - 1)
add_data = pd.DataFrame(index=self._data.index, columns=[c for c in range(count)])
if column == 0:
self._data = pd.concat([add_data, self._data], axis=1)
elif column == len(self._data.columns):
self._data = pd.concat([self._data, add_data], axis=1)
else:
self._data = pd.concat([self._data.iloc[:, :column], add_data, self._data.iloc[:, column:]], axis=1)
self.endInsertColumns()
return True
def removeRows(self, row, count, index=QModelIndex()):
self.beginRemoveRows(index, row, row + count - 1)
# Can't use DataFrame.drop() here, because there could be rows with similar index-labels
if row == 0:
self._data = self._data.iloc[row + count:]
elif row + count >= len(self._data.index):
self._data = self._data.iloc[:row]
else:
self._data = pd.concat([self._data.iloc[:row], self._data.iloc[row + count:]])
self.endRemoveRows()
return True
def removeColumns(self, column, count, index=QModelIndex()):
self.beginRemoveColumns(index, column, column + count - 1)
# Can't use DataFrame.drop() here, because there could be columns with similar column-labels
if column == 0:
self._data = self._data.iloc[:, column + count:]
elif column + count >= len(self._data.columns):
self._data = self._data.iloc[:, :column]
else:
self._data = | pd.concat([self._data.iloc[:, :column], self._data.iloc[:, column + count:]], axis=1) | pandas.concat |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from mars.tests.core import ExecutorForTest, TestBase
from mars.tensor import tensor
from mars.dataframe import Series, DataFrame
class Test(TestBase):
def setUp(self) -> None:
super().setUp()
self.executor = ExecutorForTest('numpy')
def testSeriesQuantileExecution(self):
raw = pd.Series(np.random.rand(10), name='a')
a = Series(raw, chunk_size=3)
# q = 0.5, scalar
r = a.quantile()
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw.quantile()
self.assertEqual(result, expected)
# q is a list
r = a.quantile([0.3, 0.7])
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw.quantile([0.3, 0.7])
pd.testing.assert_series_equal(result, expected)
# test interpolation
r = a.quantile([0.3, 0.7], interpolation='midpoint')
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw.quantile([0.3, 0.7], interpolation='midpoint')
| pd.testing.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": pandas.StringDtype(),
"qpnandWrites": pandas.StringDtype(),
"opnandWrites": pandas.StringDtype(),
"xpnandWrites": pandas.StringDtype(),
"unalignedHostWriteCmd": pandas.StringDtype(),
"randomReadCmd": pandas.StringDtype(),
"randomWriteCmd": pandas.StringDtype(),
"secVenCmdCount": pandas.StringDtype(),
"secVenCmdCountFails": pandas.StringDtype(),
"mrrFailOnSlcOtfPages": pandas.StringDtype(),
"mrrFailOnSlcOtfPageMarkedAsMBPD": pandas.StringDtype(),
"lcorParitySeedErrors": pandas.StringDtype(),
"fwDownloadFails": pandas.StringDtype(),
"fwAuthenticationFails": pandas.StringDtype(),
"fwSecurityRev": pandas.StringDtype(),
"isCapacitorHealthly": pandas.StringDtype(),
"fwWRCounter": pandas.StringDtype(),
"sysAreaEraseFailCount": pandas.StringDtype(),
"iusDefragRelocated4DataRetention": pandas.StringDtype(),
"I2CTemp": pandas.StringDtype(),
"lbaMismatchOnNandReads": pandas.StringDtype(),
"currentWriteStreamsCount": pandas.StringDtype(),
"nandWritesPerStream1": pandas.StringDtype(),
"nandWritesPerStream2": pandas.StringDtype(),
"nandWritesPerStream3": pandas.StringDtype(),
"nandWritesPerStream4": pandas.StringDtype(),
"nandWritesPerStream5": pandas.StringDtype(),
"nandWritesPerStream6": pandas.StringDtype(),
"nandWritesPerStream7": pandas.StringDtype(),
"nandWritesPerStream8": pandas.StringDtype(),
"nandWritesPerStream9": pandas.StringDtype(),
"nandWritesPerStream10": pandas.StringDtype(),
"nandWritesPerStream11": pandas.StringDtype(),
"nandWritesPerStream12": pandas.StringDtype(),
"nandWritesPerStream13": pandas.StringDtype(),
"nandWritesPerStream14": pandas.StringDtype(),
"nandWritesPerStream15": pandas.StringDtype(),
"nandWritesPerStream16": pandas.StringDtype(),
"nandWritesPerStream17": pandas.StringDtype(),
"nandWritesPerStream18": pandas.StringDtype(),
"nandWritesPerStream19": pandas.StringDtype(),
"nandWritesPerStream20": pandas.StringDtype(),
"nandWritesPerStream21": pandas.StringDtype(),
"nandWritesPerStream22": pandas.StringDtype(),
"nandWritesPerStream23": pandas.StringDtype(),
"nandWritesPerStream24": pandas.StringDtype(),
"nandWritesPerStream25": pandas.StringDtype(),
"nandWritesPerStream26": | pandas.StringDtype() | pandas.StringDtype |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.