prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Written by <NAME>, 22-10-2018
This script contains functions for data formatting and accuracy assessment of keras models
"""
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import keras.backend as K
from math import sqrt
import numpy as np
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# model cost function
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
# scale and format observed data as train/test inputs/labels
def format_obs_data(full_data, n_lags, n_ahead, n_train):
# split datetime column into train and test for plots
train_dates = full_data[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[:n_train]
test_dates = full_data[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[n_train:]
test_dates = test_dates.reset_index(drop=True)
test_dates['Datetime'] = pd.to_datetime(test_dates['Datetime'])
values = full_data[['GWL', 'Tide', 'Precip.']].values
values = values.astype('float32')
gwl = values[:, 0]
gwl = gwl.reshape(gwl.shape[0], 1)
tide = values[:, 1]
tide = tide.reshape(tide.shape[0], 1)
rain = values[:, 2]
rain = rain.reshape(rain.shape[0], 1)
# normalize features with individual scalers
gwl_scaler, tide_scaler, rain_scaler = MinMaxScaler(), MinMaxScaler(), MinMaxScaler()
gwl_fit = gwl_scaler.fit(gwl)
gwl_scaled = gwl_fit.transform(gwl)
tide_fit = tide_scaler.fit(tide)
tide_scaled = tide_fit.transform(tide)
rain_fit = rain_scaler.fit(rain)
rain_scaled = rain_fit.transform(rain)
# frame as supervised learning
gwl_super = series_to_supervised(gwl_scaled, n_lags, n_ahead)
gwl_super_values = gwl_super.values
tide_super = series_to_supervised(tide_scaled, n_lags, n_ahead)
tide_super_values = tide_super.values
rain_super = series_to_supervised(rain_scaled, n_lags, n_ahead)
rain_super_values = rain_super.values
# split groundwater into inputs and labels
gwl_input, gwl_labels = gwl_super_values[:, 0:n_lags+1], gwl_super_values[:, n_lags+1:]
# split into train and test sets
train_X = np.concatenate((gwl_input[:n_train, :], tide_super_values[:n_train, :], rain_super_values[:n_train, :]),
axis=1)
test_X = np.concatenate((gwl_input[n_train:, :], tide_super_values[n_train:, :], rain_super_values[n_train:, :]),
axis=1)
train_y, test_y = gwl_labels[:n_train, :], gwl_labels[n_train:, :]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print("observed training input data shape:", train_X.shape, "observed training label data shape:", train_y.shape)
print("observed testing input data shape:", test_X.shape, "observed testing label data shape:", test_y.shape)
return train_dates, test_dates, tide_fit, rain_fit, gwl_fit, train_X, test_X, train_y, test_y
# scale and format storm data as train/test inputs/labels
def format_storm_data(storm_data, n_train, tide_fit, rain_fit, gwl_fit):
# separate storm data into gwl, tide, and rain
storm_scaled = pd.DataFrame(storm_data["Datetime"])
for col in storm_data.columns:
if col.split("(")[0] == "tide":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = tide_fit.transform(col_data)
storm_scaled[col] = col_scaled
if col.split("(")[0] == "rain":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = rain_fit.transform(col_data)
storm_scaled[col] = col_scaled
if col.split("(")[0] == "gwl":
col_data = np.asarray(storm_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = gwl_fit.transform(col_data)
storm_scaled[col] = col_scaled
# split storm data into inputs and labels
storm_values = storm_scaled[storm_scaled.columns[1:]].values
storm_input, storm_labels = storm_values[:, :-18], storm_values[:, -18:]
# split into train and test sets
train_X, test_X = storm_input[:n_train, :], storm_input[n_train:, :]
train_y, test_y = storm_labels[:n_train, :], storm_labels[n_train:, :]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print("observed training input data shape:", train_X.shape, "observed training label data shape:", train_y.shape)
print("observed testing input data shape:", test_X.shape, "observed testing label data shape:", test_y.shape)
return train_X, test_X, train_y, test_y
# scale and format forecast data as train/test inputs/labels
def format_fcst_data(fcst_data, tide_fit, rain_fit, gwl_fit):
# separate forecast data into gwl, tide, and rain
fcst_scaled = pd.DataFrame(fcst_data["Datetime"])
for col in fcst_data.columns:
if col.split("(")[0] == "tide":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = tide_fit.transform(col_data)
fcst_scaled[col] = col_scaled
if col.split("(")[0] == "rain":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = rain_fit.transform(col_data)
fcst_scaled[col] = col_scaled
if col.split("(")[0] == "gwl":
col_data = np.asarray(fcst_data[col])
col_data = col_data.reshape(col_data.shape[0], 1)
col_scaled = gwl_fit.transform(col_data)
fcst_scaled[col] = col_scaled
# split fcst data into inputs and labels
fcst_values = fcst_scaled[fcst_scaled.columns[1:]].values
fcst_input, fcst_labels = fcst_values[:, :-18], fcst_values[:, -18:]
# reshape fcst input to be 3D [samples, timesteps, features]
fcst_test_X = fcst_input.reshape((fcst_input.shape[0], 1, fcst_input.shape[1]))
print("forecast input data shape:", fcst_test_X.shape, "forecast label data shape:", fcst_labels.shape)
return fcst_test_X, fcst_labels
# create df of full observed data and predictions and extract storm data
def full_pred_df(test_dates, storm_data, n_lags, n_ahead, inv_y, inv_yhat):
dates_t1 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 1:-n_ahead + 2])
dates_t1 = dates_t1.reset_index(inplace=False, drop=True)
dates_9 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 9:-n_ahead + 10])
dates_9 = dates_9.reset_index(inplace=False, drop=True)
dates_18 = pd.DataFrame(test_dates[["Datetime"]][n_lags + 18:])
dates_18 = dates_18.reset_index(inplace=False, drop=True)
obs_t1 = np.reshape(inv_y[:, 0], (inv_y.shape[0], 1))
pred_t1 = np.reshape(inv_yhat[:, 0], (inv_y.shape[0], 1))
df_t1 = np.concatenate([obs_t1, pred_t1], axis=1)
df_t1 = pd.DataFrame(df_t1, index=None, columns=["Obs. GWL t+1", "Pred. GWL t+1"])
df_t1 = pd.concat([df_t1, dates_t1], axis=1)
df_t1 = df_t1.set_index("Datetime")
obs_t9 = np.reshape(inv_y[:, 8], (inv_y.shape[0], 1))
pred_t9 = np.reshape(inv_yhat[:, 8], (inv_y.shape[0], 1))
df_t9 = np.concatenate([obs_t9, pred_t9], axis=1)
df_t9 = pd.DataFrame(df_t9, index=None, columns=["Obs. GWL t+9", "Pred. GWL t+9"])
df_t9 = pd.concat([df_t9, dates_9], axis=1)
df_t9 = df_t9.set_index("Datetime")
obs_t18 = np.reshape(inv_y[:, 17], (inv_y.shape[0], 1))
pred_t18 = np.reshape(inv_yhat[:, 17], (inv_y.shape[0], 1))
df_t18 = np.concatenate([obs_t18, pred_t18], axis=1)
df_t18 = pd.DataFrame(df_t18, index=None, columns=["Obs. GWL t+18", "Pred. GWL t+18"])
df_t18 = pd.concat([df_t18, dates_18], axis=1)
df_t18 = df_t18.set_index("Datetime")
storm_dates_t1 = storm_data[['gwl(t+1)']]
storm_dates_t1.index = storm_dates_t1.index + pd.DateOffset(hours=1)
storm_dates_t9 = storm_data[['gwl(t+9)']]
storm_dates_t9.index = storm_dates_t9.index + pd.DateOffset(hours=9)
storm_dates_t18 = storm_data[['gwl(t+18)']]
storm_dates_t18.index = storm_dates_t18.index + pd.DateOffset(hours=18)
df_t1_storms = np.asarray(df_t1[df_t1.index.isin(storm_dates_t1.index)])
df_t9_storms = np.asarray(df_t9[df_t9.index.isin(storm_dates_t9.index)])
df_t18_storms = np.asarray(df_t18[df_t18.index.isin(storm_dates_t18.index)])
storms_list = [df_t1_storms, df_t9_storms, df_t18_storms]
return df_t1, df_t9, df_t18, storms_list
# create df of storm observed data and predictions
def storm_pred_df(storm_data, n_train, inv_y, inv_yhat):
test_dates_t1 = storm_data[['Datetime', 'tide(t+1)', 'rain(t+1)']].iloc[n_train:]
test_dates_t1 = test_dates_t1.reset_index(drop=True)
test_dates_t1['Datetime'] = pd.to_datetime(test_dates_t1['Datetime'])
test_dates_t1['Datetime'] = test_dates_t1['Datetime'] + pd.DateOffset(hours=1)
test_dates_t9 = storm_data[['Datetime', 'tide(t+9)', 'rain(t+9)']].iloc[n_train:]
test_dates_t9 = test_dates_t9.reset_index(drop=True)
test_dates_t9['Datetime'] = pd.to_datetime(test_dates_t9['Datetime'])
test_dates_t9['Datetime'] = test_dates_t9['Datetime'] + pd.DateOffset(hours=9)
test_dates_t18 = storm_data[['Datetime', 'tide(t+18)', 'rain(t+18)']].iloc[n_train:]
test_dates_t18 = test_dates_t18.reset_index(drop=True)
test_dates_t18['Datetime'] = pd.to_datetime(test_dates_t18['Datetime'])
test_dates_t18['Datetime'] = test_dates_t18['Datetime'] + pd.DateOffset(hours=18)
obs_t1 = np.reshape(inv_y[:, 0], (inv_y.shape[0], 1))
pred_t1 = np.reshape(inv_yhat[:, 0], (inv_y.shape[0], 1))
df_t1 = np.concatenate([obs_t1, pred_t1], axis=1)
df_t1 = pd.DataFrame(df_t1, index=None, columns=["obs", "pred"])
df_t1 = pd.concat([df_t1, test_dates_t1], axis=1)
df_t1 = df_t1.set_index("Datetime")
df_t1 = df_t1.rename(columns={'obs': 'Obs. GWL t+1', 'pred': 'Pred. GWL t+1'})
obs_t9 = np.reshape(inv_y[:, 8], (inv_y.shape[0], 1))
pred_t9 = np.reshape(inv_yhat[:, 8], (inv_y.shape[0], 1))
df_t9 = np.concatenate([obs_t9, pred_t9], axis=1)
df_t9 = pd.DataFrame(df_t9, index=None, columns=["obs", "pred"])
df_t9 = pd.concat([df_t9, test_dates_t9], axis=1)
df_t9 = df_t9.set_index("Datetime")
df_t9 = df_t9.rename(columns={'obs': 'Obs. GWL t+9', 'pred': 'Pred. GWL t+9'})
obs_t18 = np.reshape(inv_y[:, 17], (inv_y.shape[0], 1))
pred_t18 = np.reshape(inv_yhat[:, 17], (inv_y.shape[0], 1))
df_t18 = np.concatenate([obs_t18, pred_t18], axis=1)
df_t18 = pd.DataFrame(df_t18, index=None, columns=["obs", "pred"])
df_t18 = pd.concat([df_t18, test_dates_t18], axis=1)
df_t18 = df_t18.set_index("Datetime")
df_t18 = df_t18.rename(columns={'obs': 'Obs. GWL t+18', 'pred': 'Pred. GWL t+18'})
all_data_df = pd.concat([df_t1, df_t9, df_t18], axis=1)
return all_data_df
# create df of forecast data and predictions
def fcst_pred_df(fcst_data, inv_fcst_y, inv_fcst_yhat):
# combine forecast prediction data with observations
test_dates_t1 = fcst_data[['Datetime', 'tide(t+1)', 'rain(t+1)']]
test_dates_t1 = test_dates_t1.reset_index(drop=True)
test_dates_t1['Datetime'] = pd.to_datetime(test_dates_t1['Datetime'])
test_dates_t1['Datetime'] = test_dates_t1['Datetime'] + pd.DateOffset(hours=1)
test_dates_t9 = fcst_data[['Datetime', 'tide(t+9)', 'rain(t+9)']]
test_dates_t9 = test_dates_t9.reset_index(drop=True)
test_dates_t9['Datetime'] = pd.to_datetime(test_dates_t9['Datetime'])
test_dates_t9['Datetime'] = test_dates_t9['Datetime'] + pd.DateOffset(hours=9)
test_dates_t18 = fcst_data[['Datetime', 'tide(t+18)', 'rain(t+18)']]
test_dates_t18 = test_dates_t18.reset_index(drop=True)
test_dates_t18['Datetime'] = pd.to_datetime(test_dates_t18['Datetime'])
test_dates_t18['Datetime'] = test_dates_t18['Datetime'] + pd.DateOffset(hours=18)
obs_t1 = np.reshape(inv_fcst_y[:, 0], (inv_fcst_y.shape[0], 1))
pred_t1 = np.reshape(inv_fcst_yhat[:, 0], (inv_fcst_y.shape[0], 1))
df_t1 = np.concatenate([obs_t1, pred_t1], axis=1)
df_t1 = pd.DataFrame(df_t1, index=None, columns=["Obs. GWL t+1", "Fcst. GWL t+1"])
df_t1 = pd.concat([df_t1, test_dates_t1], axis=1)
df_t1 = df_t1.set_index("Datetime")
obs_t9 = np.reshape(inv_fcst_y[:, 8], (inv_fcst_y.shape[0], 1))
pred_t9 = np.reshape(inv_fcst_yhat[:, 8], (inv_fcst_y.shape[0], 1))
df_t9 = np.concatenate([obs_t9, pred_t9], axis=1)
df_t9 =
|
pd.DataFrame(df_t9, index=None, columns=["Obs. GWL t+9", "Fcst. GWL t+9"])
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import itertools
from matplotlib import dates
import itertools
import datetime
#-----------------------------------------------------------------------------
# Rutas para guardar ---------------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##----------------------------------------Método 1 radiación al tope de la atmosfera-----------------------------------------##
##---CALCULO DE LA DECLINACION SOLAR---##
J = np.arange(1, 366, 1)
g = 2*m.pi*(J-1)/365
d = (0.006918 - 0.399912*np.cos(g) + 0.070257*np.sin(g) - 0.006758*np.cos(2*g) + 0.000907*np.sin(2*g) - 0.002697*np.cos(3*g) + 0.00148*np.sin(3*g)+ 0.000907*np.sin(2*g) - 0.002697*np.cos(3*g) + 0.00148*np.sin(3*g))
dd = list(itertools.chain.from_iterable(itertools.repeat(x, 24) for x in d))
##---CALCULO DEL ANGULO HORARIO---##
def daterange(start_date, end_date):
delta = timedelta(hours=1)
while start_date < end_date:
yield start_date
start_date += delta
##---Ecuación del tiempo---##
B = 2*m.pi*(J-81)/365
ET = 9.87*np.sin(2*B)-7.53*np.cos(B)-1.5*np.cos(B)
##---Tiempo solar---##
Ls = -75. #Meridiano estándar en grados decimales
# Ls = -75*m.pi/180. #Meridiano estándar en radianes
Ll = -75.56359 #Meridiano local en grados decimales
# Ll = -75.56359 *m.pi/180. #Meridiano local en radianes
L = (Ls-Ll)/15
LT = [] #Local standar time como datetime
start_date = datetime.datetime(2013, 1, 1, 00, 00)
end_date = datetime.datetime(2013, 1, 1, 23, 00)
for single_date in daterange(start_date, end_date):
LT.append(single_date.time())
LT = np.arange(0, 24, 1) #Local standar time como decimal
TS = []
for j in range(len(ET)): #Variar en el tiempo solar de cada dia
for i in range(len(LT)): #Variar en las horas
TS.append(LT[i]+(ET[j]/60)+L)
##---Angulo horario---##
w = []
for i in range(len(TS)):
w.append(15*(12-TS[i])) #En grados decimales
w = np.array(w)*m.pi/180 #Pasar a radianes
##---CALCULO DE LA RADIACION AL TOPE DE LA ATMOSFERA---##
##---Excentricidad---##
Eo = 1+(0.0033*np.cos((2*np.pi*J)/365))
d_h = list(itertools.chain.from_iterable(itertools.repeat(x, 24) for x in list(d))) # Declinacion a resolucion horaria
Eo_h = list(itertools.chain.from_iterable(itertools.repeat(x, 24) for x in Eo)) # Excentricidad a resolucion horaria
##---Radiacion al tope de la atmosfera---##
Cte = ((12*3.6)/m.pi)*1367
Lat = 6.217 # En grados decimales
Io = []
for i in range(len(w)-1):
Io.append(Cte*Eo_h[i]*(((m.sin(Lat*m.pi/180)*m.cos(d_h[i]))*(m.sin(w[i])-m.sin(w[i+1])))+(w[1]-w[2])*(m.sin(Lat*m.pi/180)*m.sin(d_h[i])))) ##OJOesta iteracion produce un desfase de una hora
Io = np.array(Io)
Io[Io < 0] = np.nan
##----------------------------------------Método 2 radiación al tope de la atmosfera-----------------------------------------##
Lat = 6.217*m.pi/180. # Radianes
##---Distancia de la tierra al sol para cada día---##
dist = 1 - 0.01672*(np.cos(0.985*(J-4)))
dist = dist*10E10
distM = 1.5*10E11 # Verificar unidades
So = 1367 # W/m2
# Ya la declinacion esta en radianes d = d*np.pi/180
##---Ángulo horario por día (amanecer)---##
ho = np.arccos(-1*(np.tan(Lat)*np.tan(d))) # En radianes
ho = ho*180/np.pi
##---Tiempo en horas de amanecida a partir del ángulo horario---##
to = 12 - ho/15 # En horas decimales
to_m = np.mean(to)
time_o = []
for i in range(len(to)):
a = (str(datetime.timedelta(seconds = to[i]*3600))[0:7])
time_o.append(datetime.datetime.strptime(a, '%H:%M:%S').time())
##---Ángulo horario por día (atardecer)---##
hf = -1*ho # En grados
##---Tiempo en horas de atardecer a partir del ángulo horario---##
tf = 12 - hf/15 # En horas decimales
tf_m = np.mean(tf)
time_f = []
for i in range(len(tf)):
a = (str(datetime.timedelta(seconds = tf[i]*3600))[0:7])
time_f.append(datetime.datetime.strptime(a, '%H:%M:%S').time())
##---Insolación promedio por día al tope de la atmosfera---##
Qd = So/np.pi*((distM/dist)**2)*(ho*np.sin(Lat)*np.sin(d) + np.cos(Lat)*np.cos(d)*np.sin(ho))
##---Radiación al tope de la atmósfera horario---##
Io = []
#for i in range(len(d)):
# for j in range(0, len(w), 24):
# Io.append(So*Eo[i]*(np.sin(d[i])*np.sin(Lat) + np.cos(d[i])*np.cos(Lat)*np.cos(w[j:j+24])))
Io = []
for i in range(len(dd)):
Io.append(So*Eo_h[i]*(np.sin(dd[i])*np.sin(Lat) + np.cos(dd[i])*np.cos(Lat)*np.cos(w[i])))
##--------------------------------------------------------------------------------------------------------------------##
##---CALCULO DE LA DURACIÓN DEL DÍA---##
Nd = 2./15.*ho # En horas decimales
Nd_m = np.mean(Nd)
time_Nd = []
for i in range(len(Nd)):
a = (str(datetime.timedelta(seconds = Nd[i]*3600))[0:7])
time_Nd.append(datetime.datetime.strptime(a, '%H:%M:%S').time())
##---CALCULO DE LA ALTURA SOLAR ALPHA---##
Alpha = []
for i in range(len(d)):
for j in range(0, len(w), 24):
Alpha.append(np.arcsin(np.sin(Lat)*np.sin(d[i])+np.cos(Lat)*np.cos(d[i])*np.cos(w[j:j+24])))
##---CALCULO DE LA RADIACION INCIDENTE---##
df = pd.read_table('/home/nacorreasa/SIATA/Investigacion/Consultas6001.txt', parse_dates=[2])
df = df.set_index(["fecha_hora"])
df.index = df.index.tz_localize('UTC').tz_convert('America/Bogota')
df.index = df.index.tz_localize(None)
df_h =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Periodic pattern mining with a MDL criterion"""
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
import warnings
from itertools import groupby
import numpy as np
import pandas as pd
from ..base import BaseMiner, DiscovererMixin, MDLOptimizer
from ..bitmaps import Bitmap
from ..utils import intersect2d, sliding_window_view
log = np.log2
INDEX_TYPES = (
pd.DatetimeIndex,
pd.RangeIndex,
pd.Int64Index,
)
def residual_length(S, n_event_tot, dS):
"""
compute L(o) = L(t) + L(a) for all (a, t) in S
i.e the length from a block of residual events
Parameters
----------
S: np.ndarray of shape or scalar
array containing indices for events to consider
n_event_tot: int
number of events in the original events
dS: int
max - min from original events
"""
card = S.shape[0] if isinstance(S, np.ndarray) else 1
return log(dS + 1) - log(card / float(n_event_tot))
def cycle_length(S, inter, n_event_tot, dS):
"""
Parameters
----------
S : np.array of type int64
a collection of cycles, all having the same length : r
The width of S is then r
inter: np.array of type int64
a collection of inter occurences, all having the same length: r - 1
n_event_tot: int
number of events in the original events
dS: int
max - min from original events
Returns
-------
tuple:
lengths for (a, r, p, tau, E)
"""
r = S.shape[1]
assert inter.shape[1] == r - 1 # check inter occurences compliant with events
p = np.median(inter, axis=1)
E = inter - p.reshape((-1, 1))
dE = E.sum(axis=1)
S_size = len(S) + r - 1
L_a = -log(S_size / n_event_tot) # FIXME
L_r = log(S_size)
L_p = log(np.floor((dS - dE) / (r - 1)))
L_tau = log(dS - dE - (r - 1) * p + 1)
L_E = 2 * E.shape[1] + np.abs(E).sum(axis=1)
return L_a, L_r, L_p, L_tau, L_E
def compute_cycles_dyn(S, n_tot, max_length=100):
"""
Parameters
----------
S: pd.Index or np.array
a Series of occurences
n_tot: int
total number of occurences in the original events
"""
_, cut_points = get_table_dyn(S, n_tot, max_length)
splits = _recover_splits_rec(cut_points, 0, len(S) - 1)
cycles = list()
covered = Bitmap()
for length, g in groupby(splits, key=lambda e: e[1] - e[0]):
g = list(g)
if length >= 2: # eq to length + 1 >= 3
curr_cycles = np.vstack([S[s : e + 1] for s, e in g]) # COPY
cycles.append(curr_cycles)
for s, e in g:
covered.update(range(s, e + 1))
return list(reversed(cycles)), covered
def get_table_dyn(S: pd.Index, n_tot: int, max_length=100):
"""
Parameters
----------
S: pd.Index or np.ndarray
a Series of occurences
n_tot: int
total number of occurences in the original events
max_length: int, default=None
maximum number of occurences for a cycle to cover,
by default it will be set to :math:`\log_{2}\left(|S|\right)`
"""
diffs = np.diff(S)
triples = sliding_window_view(S, 3)
diff_pairs = sliding_window_view(diffs, 2)
dS = S.max() - S.min()
score_one = residual_length(1, n_tot, dS)
scores = sum(cycle_length(triples, diff_pairs, len(S), dS))
change = scores > 3 * score_one
scores[change] = 3 * score_one # inplace replacement
cut_points = np.array([-1] * len(scores), dtype=object)
cut_points[~change] = None
scores = dict(zip(((i, i + 2) for i in range(len(scores))), scores))
cut_points = dict(zip(scores.keys(), cut_points))
max_length = min([len(S), max_length])
for k in range(4, max_length + 1):
w = sliding_window_view(S, k)
_diffs = sliding_window_view(diffs, k - 1)
_s = sum(cycle_length(w, _diffs, len(S), dS))
for ia, best_score in enumerate(_s):
cut_point = None
iz = ia + k - 1
for im in range(ia, iz):
if im - ia + 1 < 3:
score_left = score_one * (im - ia + 1)
else:
score_left = scores[(ia, im)]
if iz - im < 3:
score_right = score_one * (iz - im)
else:
score_right = scores[(im + 1, iz)]
if score_left + score_right < best_score:
best_score = score_left + score_right
cut_point = im
scores[(ia, iz)] = best_score
cut_points[(ia, iz)] = cut_point
return scores, cut_points
def extract_triples(S, dS):
"""
Extract cycles of length 3 given a list of occurences S
Parameters
----------
S: pd.Index
input occurences
dS
difference between max event and min event, from original Series
"""
l_max = log(dS + 1) - 2
triples = list()
for idx, occ in enumerate(S[1:-1], 1):
righties = S[idx + 1 :]
lefties = S[:idx]
righties_diffs = righties - occ
lefties_diffs = lefties - occ
grid = np.array(np.meshgrid(lefties_diffs, righties_diffs)).T.reshape(-1, 2)
# keep = (np.abs(grid[:, 1]) - np.abs(grid[:, 0])) <= l
keep = np.abs(grid[:, 0] - grid[:, 1]) < l_max
t = occ + grid[keep]
if t.size != 0:
e = np.array([t[:, 0], np.array([occ] * t.shape[0]), t[:, 1]]).T
assert np.issubdtype(e.dtype, np.number) and e.shape[1] == 3
triples.append(e)
if triples:
return np.vstack(triples)
return None
def merge_triples(triples, n_merge=10):
"""
Parameters
----------
triples: ndarray
cycles of size 3 (i.e triples.shape[1] == 3)
n_merge: int
maximum number of merge operation to perform
Returns
-------
list[np.ndarray]
a list of cycles
"""
if triples is None:
return list()
res = [triples]
for idx in range(1, n_merge + 1):
prev = res[idx - 1]
lefties = prev[:, :2]
righties = prev[:, -2:]
_, left_idx, right_idx = intersect2d(lefties, righties, return_indices=True)
if left_idx.size == 0 or right_idx.size == 0:
break
merged = np.hstack([prev[right_idx], prev[left_idx, 2:]])
res.append(merged)
to_delete = np.union1d(left_idx, right_idx)
res[idx - 1] = np.delete(res[idx - 1], to_delete, axis=0)
if (np.unique(merged) == np.unique(merged)).all():
res.pop(idx - 1)
break
return list(reversed(res)) # inverse order of length
def _recover_splits_rec(cut_points, ia, iz):
if (ia, iz) in cut_points:
if cut_points[(ia, iz)] is None:
return [(ia, iz)]
im = cut_points[(ia, iz)]
if im >= 0:
return _recover_splits_rec(cut_points, ia, im) + _recover_splits_rec(
cut_points, im + 1, iz
)
return []
def _remove_zeros(numbers: pd.Series):
n = 0
while (numbers % 10 == 0).all():
numbers //= 10
n += 1
return numbers, n
def _reconstruct(start, period, dE):
"""
Reconstruct occurences,
starting from `start`, and
correcting `period` with a delta for all deltas in `dE`,
`len(dE)` occurences are reconstructed
Parameters
----------
start: int or datetime
starting point for the event
period: int or timedelta
period between two occurences
d_E: np.array of [int|timedelta]
inters occurences deltas
"""
occurences = [start]
current = start
for d_e in dE:
e = current + period + d_e
occurences.append(e)
current = e
return occurences
def _generate_candidates(S_a: pd.Index, n_event_tot: int, max_length: int = 100):
if len(S_a) < 3:
return list()
S_a = S_a.sort_values()
dS = S_a[-1] - S_a[0]
cycles, covered = compute_cycles_dyn(S_a, n_event_tot, max_length)
covered = Bitmap(covered)
if len(S_a) - len(covered) > 3: # add triples if necessary
_all = Bitmap(range(len(S_a)))
_S_a = S_a[_all - covered]
triples = extract_triples(_S_a, dS)
merged = merge_triples(triples)
cycles.extend(merged)
return list(sorted(cycles, key=lambda _: _.shape[1], reverse=True))
def evaluate(S, cands):
"""
Evaluate candidates `cands`, given S
Unlike the original implementation by Galbrun & Al.,
if an occurence is present in more than one candidate cycle, we keep the cycle
with the greatest length.
Parameters
----------
S: pd.Index
Series of occurences for a specific event
cands: list[np.ndarray]
A list of candidate batches. Each batch contains candidates occurences of the same length,
hence stored in a common `numpy.ndarray`.
Batches are sorted by decreasing order of width,
so that we consider larger candidates first.
"""
res = list() # list of pandas DataFrame
covered = list()
for cand_batch in cands:
seen_occs = np.isin(cand_batch, covered).any(axis=1)
cand_batch = cand_batch[~seen_occs] # larger candidates already seen
length = cand_batch.shape[1]
E = np.diff(cand_batch, axis=1)
period = np.floor(np.median(E, axis=1)).astype("int64")
dE = (E.T - period).T
df = pd.DataFrame(
dict(start=cand_batch[:, 0], length=length, period=period, dE=dE.tolist())
)
res.append(df)
covered.extend(np.unique(cand_batch))
res = pd.concat(res, ignore_index=True)
residual_pos = Bitmap(range(len(S))) - Bitmap(np.searchsorted(S, sorted(covered)))
return res, S[residual_pos]
class PeriodicCycleMiner(BaseMiner, MDLOptimizer, DiscovererMixin):
"""
Mining periodic cycles with a MDL Criterion
PeriodicCycleMiner is an approach to mine periodic cycles from event logs
while relying on a Minimum Description Length (MDL) criterion to evaluate
candidate cycles. The goal here is to extract a set of cycles that characterizes
the periodic structure present in the data
A cycle is defined a 5-tuple of of the form
.. math:: \\alpha, r, p, \\tau, E
Where
- :math:`\\alpha` is the `repeating event`
- :math:`r` is the number of repetitions of the event, called the `cycle length`
- :math:`p` is the inter-occurence distance, called the `cycle period`
- :math:`\\tau` is the index of the first occurence, called the `cycle starting point`
- :math:`E` is a list of :math:`r - 1` signed integer offsets, i.e `cycle shift corrections`
Parameters
----------
max_length: int, default=100
maximum length for a candidate cycle, when running the dynamic programming heuristic
n_jobs : int, default=1
The number of jobs to use for the computation. Each single event is attributed a job
to discover potential cycles.
Threads are preffered over processes.
Examples
--------
>>> from skmine.periodic import PeriodicCycleMiner
>>> S = pd.Series("ring_a_bell", [10, 20, 32, 40, 60, 79, 100, 240])
>>> pcm = PeriodicCycleMiner().fit(S)
>>> pcm.discover()
start length period
ring_a_bell 0 40 4 20
1 10 3 11
References
----------
.. [1]
Galbrun, E & Cellier, P & Tatti, N & Termier, A & Crémilleux, B
"Mining Periodic Pattern with a MDL Criterion"
"""
def __init__(self, *, max_length=100, n_jobs=1):
self.cycles_ = pd.DataFrame()
self.residuals_ = dict()
self.is_datetime_ = None
self.n_zeros_ = 0
self.is_fitted = (
lambda: self.is_datetime_ is not None
) # TODO : this make pickle broken
self.n_jobs = n_jobs
self.max_length = max_length
def fit(self, S):
"""fit PeriodicCycleMiner on data logs
This generate new candidate cycles and evaluate them.
Residual occurences are stored as an internal attribute,
for later reconstruction (MDL is lossless)
Parameters
-------
S: pd.Series
logs, represented as a pandas Series
This pandas Series must have an index of type in
(pd.DatetimeIndex, pd.RangeIndex, pd.Int64Index)
"""
if not isinstance(S, pd.Series):
raise TypeError("S must be a pandas Series")
if not isinstance(S.index, INDEX_TYPES):
raise TypeError(f"S must have an index with a type amongst {INDEX_TYPES}")
self.is_datetime_ = isinstance(S.index, pd.DatetimeIndex)
if S.index.duplicated().any():
warnings.warn("found duplicates in S, removing them")
S = S.groupby(S.index).first()
S = S.copy()
S.index, self.n_zeros_ = _remove_zeros(S.index.astype("int64"))
candidates = self.generate_candidates(S)
gr = S.groupby(S.values).groups
cycles, residuals = zip(
*(evaluate(gr[event], cands) for event, cands in candidates.items())
)
c = dict(zip(candidates.keys(), cycles))
cycles = pd.concat(c.values(), keys=c.keys())
residuals = dict(zip(candidates.keys(), residuals))
residuals = {**gr, **residuals} # fill groups with no cands with all occurences
self.cycles_, self.residuals_ = cycles, residuals
return self
evaluate = evaluate
def generate_candidates(self, S):
"""
Parameters
----------
S: pd.Index or numpy.ndarray
Series of occurences for a specific event
Returns
-------
dict[object, list[np.ndarray]]
A dict, where each key is an event and each value a list of batch of candidates.
Batches are sorted in inverse order of width,
so that we consider larger candidate cycles first.
"""
n_event_tot = S.shape[0]
alpha_groups = S.groupby(S.values)
candidates = dict()
for event, S_a in alpha_groups:
cands = _generate_candidates(S_a.index, n_event_tot, self.max_length)
if cands:
candidates[event] = cands
return candidates
def discover(self):
"""Return cycles as a pandas DataFrame, with 3 columns,
with a 2-level multi-index: the first level mapping events,
and the second level being positional
Returns
-------
pd.DataFrame
DataFrame with the following columns
========== =================================
start when the cycle starts
length number of occurences in the event
period inter-occurence delay
========== =================================
Example
-------
>>> from skmine.periodic import PeriodicCycleMiner
>>> S = pd.Series("ring", [10, 20, 32, 40, 60, 79, 100, 240])
>>> pcm = PeriodicCycleMiner().fit(S)
>>> pcm.discover()
start length period
ring 0 40 4 20
1 10 3 11
"""
if not self.is_fitted():
raise Exception(f"{type(self)} instance if not fitted")
cycles = self.cycles_[["start", "length", "period"]].copy()
cycles.loc[:, ["start", "period"]] = cycles[["start", "period"]] * (
10 ** self.n_zeros_
)
if self.is_datetime_:
cycles.loc[:, "start"] = cycles.start.astype("datetime64[ns]")
cycles.loc[:, "period"] = cycles.period.astype("timedelta64[ns]")
return cycles
def reconstruct(self):
"""Reconstruct the original occurences from the current cycles.
Residuals will also be included, as the compression scheme is lossless
Denoting as :math:`\sigma(E)` the sum of the shift corrections for a cycle
:math:`C`, we have
.. math::
\Delta(C)=(r-1) p+\sigma(E)
Returns
-------
pd.Series
The reconstructed dataset
Notes
-----
The index of the resulting pd.Series will not be sorted
"""
cycles = self.cycles_[["start", "period", "dE"]]
result = list()
cycles_groups = cycles.groupby(level=0)
for alpha, df in cycles_groups:
l = list()
for start, period, dE in df.values:
occurences = _reconstruct(start, period, dE)
l.extend(occurences)
residuals = pd.Series(alpha, index=self.residuals_.get(alpha, list()))
S = pd.concat([residuals, pd.Series(alpha, index=l)])
# S.index = S.index.sort_values()
result.append(S)
for event in (
self.residuals_.keys() - cycles_groups.groups.keys()
): # add unfrequent events
result.append(
|
pd.Series(event, index=self.residuals_[event])
|
pandas.Series
|
#!/usr/bin/env python3
# Primary Developer: <NAME>
# Additional Developers: Dr. <NAME>, Dr. <NAME>, <NAME>, <NAME>
import argparse
import pandas as pd
import os
import sys
import scipy
from gtfparse import read_gtf
from subprocess import call
from scipy import stats
# shell=True is so you can handle redirects
call("echo 'Running'", shell=True)
# Argument parser to facilitate calling from the command line
parser = argparse.ArgumentParser(description='Check purity of multiple myeloma tumor samples.')
# Add input argument for BAM file from patient/sample. This is required for the program.
parser.add_argument('-i', '--input_bam',
required=True,
help='BAM file for tumor sample')
# Add input argument for GTF file containing regions to isolate.
parser.add_argument('-g', '--input_gtf',
help='GTF to be used in processing')
parser.add_argument('-t', '--threads',
default=1,
type=int,
help='Number of threads to use. User must make threads available')
# Add input argument for GTF file containing regions to isolate.
parser.add_argument('-f', '--reference_fasta',
default=None,
help='Reference genome fasta to be used in processing')
# Add input argument for output path. If no argument or only a -o is provided, program defaults
# to current working directory.
parser.add_argument('-o', '--output_path',
nargs='?',
const=str(os.getcwd()),
default=str(os.getcwd()),
help='Output path to write files. Defaults to current working directory')
# Add input argument for option to build files. If not provided, program defaults to the user specified GTF, or
# to the default GTF if no -g input is provided. Stored as true for use in decision tree later on.
parser.add_argument('-b', '--build_files',
action='store_true',
help='Include -b if you would like to build files, otherwise typing -b is unnecessary')
# Add input argument for option to keep temporary files. Stored as true for use in decision tree later on.
parser.add_argument('-k', '--keep_temp',
action='store_true',
help='Include -k if you would like to keep the temporary files. Ignore to remove temporary files '
'once the program is finished')
# Add input argument for sample name. If none is provided, the name of the BAM file will be used.
parser.add_argument('-n', '--sample_name',
help='Desired name for the sample and associated files. Defaults to the same of the BAM file')
# Add input argument for resource directory, the directory to pull files used to build the GTF and interpret the
# featureCounts output. Also defaults to current working directory if not specified.
parser.add_argument('-d', '--resource_directory',
nargs='?',
const=str(os.getcwd()),
default=str(os.getcwd()),
help='Include -d /path/to/resource/files to specify a directory to pull resource files from.'
'Defaults to current directory.')
parser.add_argument('-build_only', '--build_only',
action='store_true',
help='Invoke -build_only to stop the program after the new GTF is built.')
# Generate accessible arguments by calling parse_args
args = parser.parse_args()
# Rename each input to something shorter and more intuitive for later use in the code.
out_path = args.output_path
in_gtf = args.input_gtf
input_aln = args.input_bam
build = args.build_files
threads = args.threads
keep_temp = args.keep_temp
samplename = args.sample_name
resource_directory = args.resource_directory
ref_fasta = args.reference_fasta
build_only = args.build_only
# This statement sets the sample name to the name of the BAM if no name is provided, using os.basename to extract the
# file name from the input path and os.splitext to split the name into ('filename', 'extension'),
# e.g. ('example', '.txt). The [0] accesses the first string in that output (the file name w/o extension).
if samplename is None:
samplename = os.path.splitext(os.path.basename(input_aln))[0]
# ----------------------------------------- #
# DEFAULTS
# ----------------------------------------- #
# This section reads in several user-definable defaults for the program. They are the name of the default GTF,
# chromosomes to search, IG components to consider, and path to featureCounts respectively. They can be changed by
# editing the USER_DEFAULTS.txt file in the resource directory.
DEFAULT_FILE = open(r'%s/USER_DEFAULTS.txt' % resource_directory, 'r')
default_parameters = DEFAULT_FILE.read().splitlines()
default_gtf = default_parameters[2]
default_chromosome_list = default_parameters[4].split()
default_component_list = default_parameters[6].split()
featurecounts_path = default_parameters[8]
# ------------------------------------------------------------------------------------------------------------------- #
# FUNCTIONS THAT SUPPORT CODE AT BOTTOM
# ------------------------------------------------------------------------------------------------------------------- #
def read_aln_file(filename, threads, out_path, reference_genome_fasta=None):
"""
read the alignment file whether it is a SAM, BAM or CRAM file and returns the bam file handle
:return: aln read file handle (bamh or alnh)
"""
extension = os.path.splitext(filename)[1]
basepath = os.path.splitext(filename)[0]
basename = os.path.splitext(os.path.basename(filename))[0]
try:
if extension == ".cram":
if reference_genome_fasta is None:
raise FileNotFoundError(
"ERROR: reading CRAM file requires a Reference Genome Fasta File To be Provided with its FAI index.")
print('Conversion to BAM required: running samtools')
call("samtools view --threads %s -bh %s -o %s/%s.bam -T %s" % (threads, filename, out_path, basename,
reference_genome_fasta), shell=True)
print('Indexing new BAM file')
call("samtools index -@ %s -b %s/%s.bam" % (threads, out_path, basename), shell=True)
print('Conversion successful')
return '%s/%s.bam' % (out_path, basename)
elif extension == ".bam":
return filename
elif extension == ".sam":
return filename
else:
print('ERROR: HALTING PROGRAM AND RETURNING VARIABLES FOR DEBUGGING')
print('FILENAME:' + filename)
print('REFERENCE FASTA:' + reference_genome_fasta)
print('THREADS:' + threads)
print('EXTENSION:' + extension)
print('BASE PATH:' + basepath)
print('BASE NAME:' + basename)
sys.exit("EXPECTED EXTENSION for ALIGNMENT FILE NOT FOUND; must be either .cram, .bam or .sam")
except FileNotFoundError as fnf:
sys.exit(fnf)
except Exception as e:
sys.exit(e)
def isolate_ig(dataframe, contaminant_list, loci, chromosome_list=default_chromosome_list,
component_list=default_component_list):
"""
This function takes an input dataframe (typically one generated by using the read_gtf function on a GTF of a
reference organism's genome) and isolates all genes known to encode for IGs and contaminants based on the the
desired immunoglobulin components and chromosomes specified by the user. It excludes pseudogenes
and only includes exons.
:param dataframe: An input dataframe of an organism's genome.
:param chromosome_list: An input list of chromosomes on which immunoglobulin loci are expected to be found.
Typically 2, 14, and 22 in humans, but may vary between organisms. List can be expanded or condensed at
the discretion of the user.
:param contaminant_list: An input list of known contaminant genes to be incorporated into the output dataframe.
:param loci: An input dataframe of the overarching immunoglobulin loci on chromosomes 2, 14, and 22
:param component_list: An input list of immunoglobulin components to search for (e.g. variable, constant, joining).
The values in the list must match at least part of the string encoded under the "gene_biotype" column in the dataframe.
For example, in the UCSC human genome GTF, IG Constant regions are listed as "IG_C_gene." If the user wanted to
isolate constant regions only, it would be sufficient to pass the input as follows: component_list = ['IG_C'].
Usage example: Suppose we want to isolate the variable and constant and joining IG regions from chromosomes
2, 14, and 22 in the human genome. If our full genome dataframe is named "whole_genome", then we should
call the function as follows:
# Set up inputs
desired_regions = ['IG_C', 'IG_V', 'IG_J']
chromosomes = ['2', '14', '22']
contaminant_list = ['GeneA', 'GeneB', etc] (this is provided in resource files)
loci = read_gtf('IG_Loci.gtf') (this is provided in resource files)
# Call function
isolate_ig(whole_genome, contaminant_list, loci, chromosomes, desired_regions)
Defaults: If no component list or chromosome list is provided, the function defaults to chromosomes 2, 14, and 22,
and searches for IG_C and IG_V regions. Other inputs must be provided.
:return: ig_dataframe: A smaller dataframe containing only desired IG regions and contaminants.
"""
# Convert dataframe to string format for ease of processing
dataframe = dataframe.applymap(str)
# Create a copy of the dataframe to search against the contaminant list.
contaminant_dataframe = dataframe.applymap(str)
# Isolate rows where the "gene_biotype" column starts with "IG_" and "feature" starts with "exon"
dataframe = dataframe[dataframe['gene_biotype'].str.match('IG_') & dataframe['feature'].str.match('exon')]
# Isolate all rows where "gene_biotype" DOES NOT contain "pseudogene"
dataframe = dataframe[~dataframe.gene_biotype.str.contains("pseudogene")]
# Isolate all rows where the gene biotype matches the desired regions (IG_C, IG_V, etc)
dataframe = dataframe.loc[dataframe['gene_biotype'].str.contains('|'.join(component_list))]
# Isolate all rows where the chromosome position is 2, 14, or 22
ig_dataframe = dataframe.loc[dataframe['seqname'].str.contains('|'.join(chromosome_list))]
# Isolate all rows of the dataframe where column 'gene_name' contains an element of contaminant_list
# (a list of the gene names of known contaminants)
contaminant_dataframe = contaminant_dataframe[contaminant_dataframe['gene_name'].isin(contaminant_list)]
# Append the contaminant and loci dataframes to the IG dataframe to create one dataframe
ig_dataframe = ig_dataframe.append(contaminant_dataframe).reset_index(drop=True)
ig_dataframe = ig_dataframe.append(loci).reset_index(drop=True)
# Most GTFs contain paralogs of certain IGs, designated with a 'D', for example, IGK1-23 is a paralog of IGK-23D.
# These genes are similar enough that for our purposes they should be considered the same gene. This code isolates
# all instances of D-designated paralogs, replaces their name with the non-D equivalent, and drops duplicates.
# Isolate all D-designated paralogs into ig_dataframe2
ig_dataframe2 = ig_dataframe[
ig_dataframe['gene_name'].str.match('IGK') & ig_dataframe['gene_name'].str.contains('D') | ig_dataframe[
'gene_name'].str.match('IGHV') & ig_dataframe['gene_name'].str.contains('D')]
# Remove D from the gene names of paralogs. NOTE: This does remove all instances of 'D' in the gene name, but
# it was confirmed that no instances of D besides paralog designators exist in IG gene names in the human GRCh38 GTF
ig_dataframe2['gene_name'] = ig_dataframe2['gene_name'].str.replace('D', '')
# Drop copies from existing dataframe and append ig_dataframe2
ig_dataframe = ig_dataframe.drop(
ig_dataframe[(ig_dataframe.gene_name.str.contains('D')) & (ig_dataframe.gene_name.str.match('IGK'))].index)
ig_dataframe = ig_dataframe.drop(
ig_dataframe[(ig_dataframe.gene_name.str.contains('D')) & (ig_dataframe.gene_name.str.match('IGHV'))].index)
ig_dataframe = ig_dataframe.append(ig_dataframe2).reset_index(drop=True)
return ig_dataframe
def interpret_featurecounts(filepath, resource_directory, samplename):
"""
This function takes the output from featureCounts's operation on the input BAM and GTF files and creates
several files, title.txt, Graph_IgH and Graph_IgL, which R uses to plot, as well as
samplenamepurityCheckerresults.txt, a file containing the data for each sample in an easily-accessible text format.
Unlike isolate_ig, this function has little utility on its own, so no usage example is provided. It is recommended
that the user follow the format used later in the script, and not attempt to excise this function for separate use.
It is unlikely to work independently without extensive re-writes.
:param filepath: The directory in which the files built here will be deposited
:param resource_directory: The directory from which the files used here will be sourced.
:param samplename: The name of the sample.
:return: No return, but several files will be written by the function.
"""
# Create dataframe called "reads" by importing the output from featurecounts. First row is skipped in the import
# since it is just a header, second row is used to generate column labels. Tab-separated and new-line-terminated
# are specified to ensure a proper read (the output dataframe will be one column or row if not specified)
# filepath is used here instead of resource directory because featurecounts will write its output to the
# directory specified by filepath
reads = pd.read_csv(r'%s/%s.txt' % (filepath, samplename), sep='\t', lineterminator='\n',
skiprows=(0), header=(1))
# Rename the column containing the counts to "Count". For whatever reason it comes labeled with the input file path.
reads.rename(columns={reads.columns[6]: "Count"}, inplace=True)
# Read in featurecounts's summary file
summary = pd.read_csv(r'%s/%s.txt.summary' % (filepath, samplename), sep='\t',
lineterminator='\n', skiprows=(0), header=(0))
# Rename the Count column, since it is given a long and unweildy name by default.
summary.rename(columns={summary.columns[1]: "Count"}, inplace=True)
# Create a new dataframe, "Condensed", containing data for Assigned reads and ignoring Unassigned reads EXCEPT
# those unassigned because they did not map to anything included in the GTF. These represent Ig genes and Non-Ig
# genes respectively, while the excluded data represents reads that map to more than one location, reads of too
# poor quality to map, and other "noisy" data that should be excluded.
condensed = summary[
summary['Status'].str.contains('Assigned') | summary['Status'].str.contains('Unassigned_NoFeatures')]
# Sum the "count" column of the condensed dataframe to generate the total number of verified reads (needed
# to calculate RPKM).
Featurecount_Total = condensed['Count'].sum()
# Read in all text files containing the names of genes in specific loci (or known to be contaminants) as lists.
Contaminant_List = open(r'%s/Non_Bcell_Contamination_GeneList_e98.txt' % resource_directory, 'r').read().split('\n')
IGH_Variable_List = open(r'%s/IgH_Variable_Genes.txt' % resource_directory, 'r').read().split('\n')
IGH_Constant_List = open(r'%s/IgH_Constant_Genes.txt' % resource_directory, 'r').read().split('\n')
IGK_Variable_List = open(r'%s/IgK_Variable_Genes.txt' % resource_directory, 'r').read().split('\n')
IGK_Constant_List = open(r'%s/IgK_Constant_Genes.txt' % resource_directory, 'r').read().split('\n')
IGL_Variable_List = open(r'%s/IgL_Variable_Genes.txt' % resource_directory, 'r').read().split('\n')
IGL_Constant_List = open(r'%s/IgL_Constant_Genes.txt' % resource_directory, 'r').read().split('\n')
# Equivalent to featurecounts_counts post-processing
# Drop the chromosome, start, end, and strand columns of the "reads" dataframe. Generate two new columns,
# "Reads per base pair" (gene reads divided by gene length), and "RPKM" (reads per kilobase million), a measurement
# equivalent to [(1,000,000)*(Number of Reads for a Gene)]/[(Gene Length in kb)*(Total Reads for the Sample)].
# A multiplicative factor of one billion is used here instead of one million because featurecounts returns gene
# length in base pairs, not kilobase pairs, and therefore the extra factor of one thousand is necessary.
reads = pd.concat([reads['Geneid'], reads['Count'], reads['Length'], reads['Count'] / reads['Length'],
1000000000 * reads['Count'] / reads['Length'] / Featurecount_Total], axis=1)
# Rename the new columns to "reads per bp" and "RPKM". When they are generated,
# pandas labels them with numbers automatically.
reads.rename(columns={reads.columns[3]: 'Reads per bp', reads.columns[4]: 'RPKM'}, inplace=True)
# Calculate the geometric mean of the RPKM column for all identified genes known to be contaminants.
# Geometric mean is equivalent to the **product** of n elements divided by n, as opposed to the more common
# arithmetic mean, the **sum** of n elements divided by n.
# Take the featurecounts output and isolate a dataframe containing only genes known to be
# in the list of contaminants.
geomeandf = reads[reads['Geneid'].isin(Contaminant_List)]
# Add one to each element of the RPKM column. This is to avoid a multiply-by-zero situation when calculating
# the geomean. At any instance of 0, the mean is instead multiplied by one, yielding the same result.
# We assume adding 1 universally scales the geometric mean equivalently for all measurements.
geomeandf['RPKM'] = geomeandf['RPKM'] + 1
# Call scipy's gmean function on the RPKM column of the geomean dataframe to get the geometric mean.
# It is rounded to two decimal places here for readability, but the user should feel free to choose any number.
geomean = scipy.stats.gmean(geomeandf.loc[:, 'RPKM'], axis=0).round(2)
# Equivalent to IGH_Variable_Counts, etc. in bash script
# Create dataframes of genes specific to each locus by returning the subset of the complete dataframe where the gene
# name (in column Geneid) is in one of the lists imported above.
# The reset_index method is used again here since a column is added later, and if the indices are not consistent
# between the two, the column will not be properly appended to the dataframe.
IGHVdf = reads[reads['Geneid'].isin(IGH_Variable_List)].reset_index()
IGHCdf = reads[reads['Geneid'].isin(IGH_Constant_List)].reset_index()
IGKVdf = reads[reads['Geneid'].isin(IGK_Variable_List)].reset_index()
IGKCdf = reads[reads['Geneid'].isin(IGK_Constant_List)].reset_index()
IGLVdf = reads[reads['Geneid'].isin(IGL_Variable_List)].reset_index()
IGLCdf = reads[reads['Geneid'].isin(IGL_Constant_List)].reset_index()
Contaminantdf = reads[reads['Geneid'].isin(Contaminant_List)].reset_index()
# Calculate the number of reads for each subset of genes by summing the
# "Count" column of their respective dataframes.
Total_IGHC_Reads = IGHCdf['Count'].sum()
Total_IGHV_Reads = IGHVdf['Count'].sum()
Total_IGKC_Reads = IGKCdf['Count'].sum()
Total_IGKV_Reads = IGKVdf['Count'].sum()
Total_IGLC_Reads = IGLCdf['Count'].sum()
Total_IGLV_Reads = IGLVdf['Count'].sum()
# Calculate the number of reads for the entire Heavy, Lambda, and Kappa loci. Since this information is included in
# a single cell, and need not be generated by summing the counts of a subset of the data, the .str.contains and .at
# methods are used, in place of the .isin and .sum methods used above. The reset_index method is necessary because
# while there is only one row of data for each of these loci, its index may be arbitrary, and will be kept by pandas
# by default, confusing the .at method. Forcing the index to 0 with the reset ensures that .at[0,'Count'] will work.
Total_IGH = reads[reads['Geneid'].str.contains('HEAVY_Locus')].reset_index().at[0, 'Count']
Total_IGK = reads[reads['Geneid'].str.contains('KAPPA_Locus')].reset_index().at[0, 'Count']
Total_IGL = reads[reads['Geneid'].str.contains('LAMBDA_Locus')].reset_index().at[0, 'Count']
# Generate several metrics used in later calculations using simple arithmetic on variables already produced.
Total_IG = Total_IGH + Total_IGK + Total_IGL
Percent_IG = Total_IG / Featurecount_Total
Total_Light_Chain = Total_IGK + Total_IGL
Total_Light_Variable = Total_IGKV_Reads + Total_IGLV_Reads
Total_Light_Constant = Total_IGKC_Reads + Total_IGLC_Reads
Percent_Kappa = Total_IGK / Total_Light_Chain
Percent_Lambda = Total_IGL / Total_Light_Chain
# About here, we officially transition to building the files that the R script wants
# This function takes an IG subtype dataframe and returns a corresponding dataframe containing additional
# information, such as frequency, which is used in R processing and retained as part of sample data
def generate_calc_table(dataframe, group_total, label):
labelseries = pd.Series([label] * len(dataframe.index))
dataframe_Calc = pd.concat([dataframe['Geneid'], dataframe['Count'], dataframe['Count'] / group_total,
dataframe['Count'] / Featurecount_Total, labelseries, dataframe['Length']], axis=1)
dataframe_Calc.columns = ['Geneid', 'Count', 'List_Percent', 'Total_Percent', 'Subtype', 'Length']
return dataframe_Calc
# Call the above function on each dataframe to generate their corresponding Calc tables.
IGHV_Calc = generate_calc_table(IGHVdf, Total_IGHV_Reads, 'IGHV')
IGHC_Calc = generate_calc_table(IGHCdf, Total_IGHC_Reads, 'IGHC')
IGKV_Calc = generate_calc_table(IGKVdf, Total_Light_Variable, 'IGKV')
IGKC_Calc = generate_calc_table(IGKCdf, Total_Light_Constant, 'IGLC')
IGLV_Calc = generate_calc_table(IGLVdf, Total_Light_Variable, 'IGLV')
IGLC_Calc = generate_calc_table(IGLCdf, Total_Light_Constant, 'IGLC')
# Concatenate above tables into two larger tables, the Heavy Chain table (IgH), and Light Chain Table (IgL)
# Ignore previous index and rename columns (three are labeled 'Count' at this point due to their origin as
# columns computed from the initial 'Count' column in generate_calc_table)
Graph_IgL = pd.concat([IGKC_Calc, IGKV_Calc, IGLC_Calc, IGLV_Calc]).reset_index().drop(columns='index')
Graph_IgL.columns = ['CommonName', 'Count', 'Percentage', 'TotalFrequency', 'Locus', 'ElementSize']
Graph_IgH = pd.concat([IGHC_Calc, IGHV_Calc]).reset_index().drop(columns='index')
Graph_IgH.columns = ['CommonName', 'Count', 'Percentage', 'TotalFrequency', 'Locus', 'ElementSize']
# Write these tables to a tab-delimited text file. R will use these files to plot
Graph_IgH.to_csv(r'%s/%sGraph_IgH.txt' % (filepath, samplename), sep='\t', float_format='%.12f',
index=False)
Graph_IgL.to_csv(r'%s/%sGraph_IgL.txt' % (filepath, samplename), sep='\t', float_format='%.12f',
index=False)
# This function returns a list of primary information from the input dataframe, e.g. when given IGHC_Calc, etc.
# it will extract the two most-read genes, their frequencies, and the difference between their frequencies.
def get_Primary(dataframe):
Primary = dataframe.sort_values(by='Count', ascending=False).reset_index().at[0, 'Geneid']
PrimaryFreq = dataframe.sort_values(by='Count', ascending=False).reset_index().at[0, 'List_Percent']
Secondary = dataframe.sort_values(by='Count', ascending=False).reset_index().at[1, 'Geneid']
SecondaryFreq = dataframe.sort_values(by='Count', ascending=False).reset_index().at[1, 'List_Percent']
Delta = PrimaryFreq - SecondaryFreq
return
|
pd.Series([Primary, PrimaryFreq, Secondary, SecondaryFreq, Delta])
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@version: 1.4.0
@file: GSP_main.py
@time: 2021/1/26 10:50
@functions: graph signal processing main script
@update: support Yeo-ICN definition
@update: support ICN-level brain activity and connecitivty strength saving
"""
import numpy as np
import glob
import os
import time
import matplotlib.pyplot as plt
from pygsp import graphs, filters, plotting
from GSP_utilities import surrogate_BOLD_create, save_variable, load_variable
import pandas as pd
from dppd import dppd
dp, X = dppd()
# 1. path locations and parameters
start = time.time()
deriv_path = '/home/amax/data/cye/MScohort_BIDS_clean/derivatives'
connectome_path = os.path.join(deriv_path, 'mrtrix')
xcpengine_path = os.path.join(deriv_path, 'xcpengine')
network_assign_path = 'CAB-NP_v1.1_Labels-ReorderedbyNetworks_Yeo.csv'
num_BOLD_timepoints = 180
num_rand = 100 # number of surrogates
functional_type = 'BOLD'
tract_type = 'meanlength' # one of the following: invlength, invnodevol, level-participant_connectome, meanlength
ICN_type = 'Yeo' # one of the following: 'Yeo', 'Cole'
normalize_type = 'both' # 'W': normalize W; 'L': normalize Laplacian (Preti method); 'both': normalize both W and Laplacian
# 2. read network assignment for hcpmmp
network_assign_csv = pd.read_csv(network_assign_path)
network_assign_csv = dp(network_assign_csv).mutate(NETWORK=X.Yeo_NETWORK).pd
network_assign_csv = dp(network_assign_csv).mutate(NETWORKKEY=X.Yeo_NETWORKKEY).pd
num_network_df = dp(network_assign_csv).summarise((X.NETWORKKEY, np.max, 'hp_max')).pd
num_network = num_network_df.iloc[0,0]
network_rowindex_ls = []
for network_i in range(1,num_network+1):
df_network = dp(network_assign_csv).filter_by(X.NETWORKKEY == network_i).pd
network_rowindex_ls.append(df_network.index.values)
network_unique_df = dp(network_assign_csv).distinct('NETWORKKEY').pd
network_unique_df = network_unique_df.sort_values(by='NETWORKKEY',ascending = True)
network_unique_df = dp(network_unique_df).filter_by(-X.NETWORK.isin(['Undefine'])).pd # remove undefined ICN
network_unique_df = network_unique_df.reset_index()
# 3. define group of interests
cohort1 = 'ms'
cohort2 = 'nc'
cohort3 = 'nmo'
cohort4 = 'cis'
cohort1_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort1 + '*'))
cohort2_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort2 + '*'))
cohort3_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort3 + '*'))
cohort4_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort4 + '*'))
cohort_connectome_ls = cohort1_connectome_ls + cohort2_connectome_ls + cohort3_connectome_ls + cohort4_connectome_ls
cohort_connectome_ls.sort()
cohort1_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort1 + '*'))
cohort2_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort2 + '*'))
cohort3_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort3 + '*'))
cohort4_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort4 + '*'))
cohort_fmri_ls = cohort1_fmri_ls + cohort2_fmri_ls + cohort3_fmri_ls + cohort4_fmri_ls
cohort_name_ls = [os.path.basename(item) for item in cohort_connectome_ls]
remove_name_ls = ['sub-nc011','sub-nc039', 'sub-nmo002', 'sub-nmo019', 'sub-cis002','sub-cis015', 'sub-ms015'] # problematic cases
cohort_name_ls = list(set(cohort_name_ls) - set(remove_name_ls)) # remove problematic cases
for i in remove_name_ls: # remove problematic cases
cohort_connectome_ls = [x for x in cohort_connectome_ls if i not in x]
cohort_fmri_ls = [x for x in cohort_fmri_ls if i not in x]
cohort_name_ls.sort()
cohort_connectome_ls.sort()
cohort_fmri_ls.sort()
if len(cohort_connectome_ls) != len(cohort_fmri_ls):
print('Number of connectome and xcpengine results not matched')
# 4. create a dataframe to store individual filepath
path_dict = {'subname':cohort_name_ls, 'mrtrix_path': cohort_connectome_ls, 'xcp_path':cohort_fmri_ls}
path_df = pd.DataFrame(path_dict, columns=['subname','mrtrix_path','xcp_path'])
path_df = dp(path_df).mutate(connectome_path=X.mrtrix_path + '/connectome/' + X.subname +'_parc-hcpmmp1_' + tract_type + '.csv').pd
path_df = dp(path_df).mutate(BOLD_series_path=X.xcp_path + '/fcon/hcpmmp/hcpmmp.1D').pd
path_df = dp(path_df).mutate(fmri_map_path=X.xcp_path + '/roiquant/hcpmmp/' + X.subname +'_hcpmmp_mean.csv').pd
print('finished step 4')
# 5. load individual connectome as ndarray
num_parcels = len(network_assign_csv)
num_sub = len(path_df)
path_df_nc = dp(path_df).filter_by(X.subname.str.contains('nc')).pd
num_nc = len(path_df_nc)
nc_idx = path_df_nc.index
connectome_array = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
indiviudal_connectome = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
connectome_array[:,:,sub_idx] = indiviudal_connectome
# 6. load individual BOLD series and fill missing part according to /fcon/hcpmmp/missing.txt
BOLD_series_3D = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
BOLD_series = np.genfromtxt(path_df.loc[sub_idx, 'BOLD_series_path'])
BOLD_series = BOLD_series.T
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if BOLD_series[missing_parcel_id-1,:].sum() != 0:
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_parcel_id-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
else: # multiple parcels missing
for missing_idx in missing_parcel_id:
network_key = network_assign_csv.loc[missing_idx-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_idx-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
BOLD_series_3D[:,:,sub_idx] = BOLD_series
print('finished loading individual BOLD series and filling missing part')
# 7. load fmri parametric map and fill missing part according to /fcon/hcpmmp/missing.txt
fmri_paramap = np.zeros(shape=(num_parcels, num_sub))
paramap_str = 'mean_alffZ'
for sub_idx in range(len(path_df)):
fmri_map = pd.read_csv(path_df.loc[sub_idx, 'fmri_map_path'],index_col=0)
fmri_map = fmri_map.loc[:,paramap_str]
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if not np.isnan(fmri_map[missing_parcel_id]):
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
fmri_map[int(missing_parcel_id)] = np.mean(fmri_map[network_parcel_idx])
fmri_map = fmri_map.to_numpy()
else: # multiple parcels missing
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_rowindex_ls = np.array(network_rowindex_ls, dtype=object)
network_parcel_idx = network_rowindex_ls[network_key-1]
for parcel_i in range(missing_parcel_id.size):
fmri_map[int(missing_parcel_id[parcel_i])] = np.mean(fmri_map[network_parcel_idx[parcel_i]])
fmri_map = fmri_map.to_numpy()
fmri_paramap[:,sub_idx] = fmri_map
print('finished loading fmri parametric map and fill missing part')
# 8. load connectome and functional signal and do GSP
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
func_sig = BOLD_series_3D
s_head_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_rand_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub, num_rand))
else:
raise ValueError('undefined functional signal')
G_U_cohort = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
W = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
# Symmetric Normalization of adjacency matrix
D = np.diag(np.sum(W,1)) #degree
D_power = np.power(D, (-1/2))
D_power[np.isinf(D_power)] = 0
Wsymm = D_power @ W @ D_power
#The eigenvector matrix G.U is used to define the Graph Fourier Transform of the graph signal S
if normalize_type == 'W':
G = graphs.Graph(Wsymm)
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'L':
G = graphs.Graph(W, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'both':
Wsymm = np.triu(Wsymm) + np.triu(Wsymm).T - np.diag(np.triu(Wsymm).diagonal()) # force symmetric
G = graphs.Graph(Wsymm, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
# L = np.eye(len(Wsymm)) - Wsymm
# lamda, U = np.linalg.eig(L)
# U = U[:, np.argsort(lamda)]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head = U.T @ func_sig[:,:,sub_idx]
s_head_cohort[:,:,sub_idx] = s_head
# calcualte surrogate for individual
s_rand_cohort[:,:,sub_idx,:] = surrogate_BOLD_create(U, func_sig[:,:,sub_idx], num_rand)
print('finished Graph Fourier Transform')
# save_variable(G_U_cohort, 'G_U_cohort.pkl')
# save_variable(s_head_cohort, 's_head_cohort.pkl')
# save_variable(s_rand_cohort, 's_rand_cohort.pkl')
# G_U_cohort = load_variable('G_U_cohort.pkl')
# s_head_cohort = load_variable('s_head_cohort.pkl')
# s_rand_cohort = load_variable('s_rand_cohort.pkl')
# 8.5(optional). plot Sihag2020 plot
# take nc001 as example
nc001_idx = path_df.subname[path_df.subname == 'sub-nc001'].index.tolist()[0]
s_low = G_U_cohort[:,0:4, nc001_idx] @ s_head_cohort[0:4,:,nc001_idx]
s_high = G_U_cohort[:,-55:-51, nc001_idx] @ s_head_cohort[-55:-51,:,nc001_idx]
np.savetxt("nc001_s_low_both.csv", s_low, delimiter=",")
np.savetxt("nc001_s_high_both.csv", s_high, delimiter=",")
# 9. calculate the median-split threshold
NC_index = [cohort_name_ls.index(x) for x in cohort_name_ls if 'nc' in x]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head_NC = s_head_cohort[:,:,NC_index]
s_head_NC_square = np.power(s_head_NC, 2)
#s_head_NC_square = np.power(s_head_NC_square, 1/2)
s_head_NC_square_mean = np.mean(s_head_NC_square, (1,2)) # average for each timepoint and each subject
s_head_NC_AUCTOT = np.trapz(s_head_NC_square_mean)
i=0
AUC=0
while AUC < s_head_NC_AUCTOT/2:
AUC = np.trapz(s_head_NC_square_mean[:i])
i = i + 1
cutoff = i-1
print('finished calculating the median-split threshold')
print('cutoff = {}'.format(cutoff))
# 10. calculate decoupling index for empirical data
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_liberal_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
s_aligned_cohort[:,:,sub_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_head_cohort[0:cutoff,:,sub_idx]
s_liberal_cohort[:,:,sub_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_head_cohort[cutoff-1:-1,:,sub_idx]
s_aligned_individual = np.linalg.norm(s_aligned_cohort, ord=2, axis=1)
s_liberal_individual = np.linalg.norm(s_liberal_cohort, ord=2, axis=1)
s_deCoupIdx_individual = s_liberal_individual / s_aligned_individual
s_aligned = np.mean(s_aligned_individual[:,nc_idx], axis=1)
s_liberal = np.mean(s_liberal_individual[:,nc_idx], axis=1)
s_deCoupIdx_node = s_liberal/s_aligned # only for NC
print('finished calculating decoupling index for empirical data')
# 11. calculate decoupling index for surrogate data only for NC
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
s_liberal_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
for i, sub_idx in enumerate(nc_idx):
for rand_idx in range(num_rand):
s_aligned_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_rand_cohort[0:cutoff,:,sub_idx,rand_idx]
s_liberal_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_rand_cohort[cutoff-1:-1,:,sub_idx,rand_idx]
# norm for BOLD timepoints
s_aligned_norm_rand = np.linalg.norm(s_aligned_cohort_rand, ord=2, axis=1)
s_liberal_norm_rand = np.linalg.norm(s_liberal_cohort_rand, ord=2, axis=1)
# average for cohorts
s_aligned_rand = np.mean(s_aligned_norm_rand, axis=1)
s_liberal_rand = np.mean(s_liberal_norm_rand, axis=1)
# decoupling index
s_deCoupIdx_node_rand = s_liberal_rand/s_aligned_rand
print('finished calculating decoupling index for surrogate data')
# 12. network-level harmonics for emperical and surrogate data
s_aligned_network = np.zeros(shape=(num_network))
s_liberal_network = np.zeros(shape=(num_network))
s_aligned_network_individual = np.zeros(shape=(num_network, num_sub))
s_liberal_network_individual = np.zeros(shape=(num_network, num_sub))
s_aligned_network_rand = np.zeros(shape=(num_network, num_rand))
s_liberal_network_rand = np.zeros(shape=(num_network, num_rand))
for i in range(num_network):
s_aligned_network[i] = np.mean(s_aligned[network_rowindex_ls[i]])
s_liberal_network[i] = np.mean(s_liberal[network_rowindex_ls[i]])
s_aligned_network_individual[i,:] = np.mean(s_aligned_individual[network_rowindex_ls[i],:], axis=0)
s_liberal_network_individual[i,:] = np.mean(s_liberal_individual[network_rowindex_ls[i],:], axis=0)
s_aligned_network_rand[i,:] = np.mean(s_aligned_rand[network_rowindex_ls[i],:], axis=0)
s_liberal_network_rand[i,:] = np.mean(s_liberal_rand[network_rowindex_ls[i],:], axis=0)
s_deCoupIdx_network = s_liberal_network/s_aligned_network
s_deCoupIdx_network_individual = s_liberal_network_individual/s_aligned_network_individual
s_deCoupIdx_network_rand = s_liberal_network_rand/s_aligned_network_rand
# 13. brain-level harmonics for emperical and surrogate data
s_aligned_brain = np.mean(s_aligned)
s_liberal_brain = np.mean(s_liberal)
s_deCoupIdx_brain = s_liberal_brain/s_aligned_brain
s_aligned_brain_individual = np.mean(s_aligned_individual, axis=0)
s_liberal_brain_individual = np.mean(s_liberal_individual, axis=0)
s_deCoupIdx_brain_individual = s_liberal_brain_individual/s_aligned_brain_individual
s_aligned_brain_rand = np.mean(s_aligned_rand, axis=0)
s_liberal_brain_rand = np.mean(s_liberal_rand, axis=0)
s_deCoupIdx_brain_rand = s_liberal_brain_rand/s_aligned_brain_rand
print('s_deCoupIdx_brain = {}'.format(s_deCoupIdx_brain))
# 14. significance of surrogate for plot
# node-level
s_deCoupIdx_node_significance = np.logical_or((np.percentile(s_deCoupIdx_node_rand, 5, axis=1) >= s_deCoupIdx_node), (np.percentile(s_deCoupIdx_node_rand, 95, axis=1) <= s_deCoupIdx_node))
s_deCoupIdx_node_significance = s_deCoupIdx_node_significance.astype(np.int)
# network-level
s_deCoupIdx_network_significance = np.logical_or((np.percentile(s_deCoupIdx_network_rand, 5, axis=1) >= s_deCoupIdx_network), (np.percentile(s_deCoupIdx_network_rand, 95, axis=1) <= s_deCoupIdx_network))
s_deCoupIdx_network_significance = s_deCoupIdx_network_significance.astype(np.int)
# brain-level
s_deCoupIdx_brain_significance = np.logical_or((np.percentile(s_deCoupIdx_brain_rand, 5, axis=0) >= s_deCoupIdx_brain), (np.percentile(s_deCoupIdx_brain_rand, 95, axis=0) <= s_deCoupIdx_brain))
# 15. save results to csv
if normalize_type == 'W':
normalize_str = '_W'
elif normalize_type == 'L':
normalize_str = '_L'
elif normalize_type == 'both':
normalize_str = '_both'
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
csv_folder = 'BOLD_4D_' + tract_type + '_' + normalize_str
if not os.path.exists(os.path.abspath(csv_folder)):
os.mkdir(os.path.abspath(csv_folder))
# save surrogate (ndarray with num_rand × num_region)
s_deCoupIdx_node_rand_df = pd.DataFrame(data = s_deCoupIdx_node_rand.T, columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_rand_df = pd.DataFrame(data = s_deCoupIdx_network_rand.T, columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_brain_rand_df = pd.DataFrame(data = s_deCoupIdx_brain_rand)
s_deCoupIdx_node_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_rand_df.csv'))
s_deCoupIdx_network_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_rand_df.csv'))
s_deCoupIdx_brain_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_rand_df.csv'))
# save surrogate significance (ndarray with 1 × num_region)
s_deCoupIdx_node_significance_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_node_significance, axis=0), columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_significance_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_network_significance, axis=0), columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_node_significance_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_significance_df.csv'))
s_deCoupIdx_network_significance_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_significance_df.csv'))
with open(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_significance.txt'), 'w') as output_file:
output_file.write(str(s_deCoupIdx_brain_significance))
# save empirical harmonics for NC cohort (for plot usage, ndarray with 1 × num_region)
s_deCoupIdx_node_empirical_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_node, axis=0), columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_empirical_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_network, axis=0), columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_node_empirical_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_empirical_df.csv'))
s_deCoupIdx_network_empirical_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' +'-network_empirical_df.csv'))
with open(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_empirical.txt'), 'w') as output_file:
output_file.write(str(s_deCoupIdx_brain))
# save subject-level harmonics (ndarray with num_sub × num_region)
s_deCoupIdx_node_individual_df = pd.DataFrame(data = s_deCoupIdx_individual.T, columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_individual_df = pd.DataFrame(data = s_deCoupIdx_network_individual.T, columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_brain_individual_df = pd.DataFrame(data = s_deCoupIdx_brain_individual)
s_deCoupIdx_node_individual_df = pd.concat([path_df.loc[:,'subname'],s_deCoupIdx_node_individual_df],axis=1)
s_deCoupIdx_network_individual_df =
|
pd.concat([path_df.loc[:,'subname'],s_deCoupIdx_network_individual_df],axis=1)
|
pandas.concat
|
import pandas as pd
c1 = pd.read_csv('machine/Calling/Sensors_1.csv')
c2 = pd.read_csv('machine/Calling/Sensors_2.csv')
c3 = pd.read_csv('machine/Calling/Sensors_3.csv')
c4 = pd.read_csv('machine/Calling/Sensors_4.csv')
c5 = pd.read_csv('machine/Calling/Sensors_5.csv')
c6 = pd.read_csv('machine/Calling/Sensors_6.csv')
c7 = pd.read_csv('machine/Calling/Sensors_7.csv')
c8 = pd.read_csv('machine/Calling/Sensors_8.csv')
c9 = pd.read_csv('machine/Calling/Sensors_9.csv')
c10 = pd.read_csv('machine/Calling/Sensors_10.csv')
calling = pd.concat([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10], axis = 0)
t1 = pd.read_csv('machine/Texting/Sensors_1.csv')
t2 = pd.read_csv('machine/Texting/Sensors_2.csv')
t3 = pd.read_csv('machine/Texting/Sensors_3.csv')
t4 = pd.read_csv('machine/Texting/Sensors_4.csv')
t5 = pd.read_csv('machine/Texting/Sensors_5.csv')
t6 = pd.read_csv('machine/Texting/Sensors_6.csv')
t7 = pd.read_csv('machine/Texting/Sensors_7.csv')
t8 = pd.read_csv('machine/Texting/Sensors_8.csv')
t9 = pd.read_csv('machine/Texting/Sensors_9.csv')
t10 =
|
pd.read_csv('machine/Texting/Sensors_10.csv')
|
pandas.read_csv
|
"""
Open Power System Data
Household Datapackage
read.py : read time series files
"""
from datetime import datetime, time
import logging
import os
from struct import unpack
import pytz
import pandas as pd
from .tools import update_progress
logger = logging.getLogger('log')
logger.setLevel('INFO')
def read(household_name, dir_name, household_region, household_type, feeds, headers,
start_from_user=None, end_from_user=None):
"""
For the households specified in the households.yml file, read
Parameters
----------
household_name : str
Name of the Household to be placed in the column-MultiIndex
dir_name : str
directory path to the location of the Households MySQL data
household_region : str
Region of the Household to be placed in the column-MultiIndex
household_type : str
Type of the Household to be placed in the column-MultiIndex
feeds : dict of key value pairs
Indicator for subset of feed ids, available for the Household
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
start_from_user : datetime.date, default None
Start of period for which to read the data
end_from_user : datetime.date, default None
End of period for which to read the data
Returns
----------
data_set: pandas.DataFrame
A DataFrame containing the combined data for household
"""
data_set =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import json
import csv
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm_notebook as tqdm
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk.stem import WordNetLemmatizer
import nltk
nltk.download('averaged_perceptron_tagger')
import spacy
import math
import string
import sys
import random
from collections import Counter
from itertools import chain
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
ge=pd.read_csv('./testgoodwordse.csv')
gn=
|
pd.read_csv('./testgoodwordsn.csv')
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 31 20:09:16 2019
@author: hanbosun
"""
# %%
# %% simple blender
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
import seaborn as sns
sns.set()
from datetime import datetime
import os
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
os.chdir('/Users/hanbosun/Documents/GitHub/TrasactionPrediction/')
s42=pd.read_csv('submission/lgb_submission_42.csv')['target']
s72=pd.read_csv('submission/lgb_submission_72.csv')['target']
s0=pd.read_csv('submission/lgb_submission_seed0.csv')['target']
s1=pd.read_csv('submission/lgb_submission_seed1.csv')['target']
s2=pd.read_csv('submission/lgb_submission_seed2.csv')['target']
s3=pd.read_csv('submission/lgb_submission_seed3.csv')['target']
s4=pd.read_csv('submission/lgb_submission_seed4.csv')['target']
s5=pd.read_csv('submission/lgb_submission_seed5.csv')['target']
s6=pd.read_csv('submission/lgb_submission_seed6.csv')['target']
s7=pd.read_csv('submission/lgb_submission_seed7.csv')['target']
s12=pd.read_csv('submission/lgb_submission_seed12.csv')['target']
s13=pd.read_csv('submission/lgb_submission_seed13.csv')['target']
s14=pd.read_csv('submission/lgb_submission_seed14.csv')['target']
s15=pd.read_csv('submission/lgb_submission_seed15.csv')['target']
s16=pd.read_csv('submission/lgb_submission_seed16.csv')['target']
s17=pd.read_csv('submission/lgb_submission_seed17.csv')['target']
s18=pd.read_csv('submission/lgb_submission_seed18.csv')['target']
s19=pd.read_csv('submission/lgb_submission_seed19.csv')['target']
s = pd.DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3, 's4': s4, 's5': s5, 's6': s6, 's7': s7,
's12': s12, 's13': s13, 's14': s14, 's15': s15, 's16': s16, 's17': s17, 's18': s18, 's19': s19})
# since we use AUC, and distribution of the probability is not Normal, Kendal correlation is more appropriate
kendall = s.corr(method = 'kendall') # spearman pearson
submission = pd.read_csv('input/sample_submission.csv')
submission['target'] = (s0 + s1 + s2 + s3 + s4 + s5 + s6 + s7 + s12 + s13 + s14 + s15 + s16 + s17 + s18 + s19) /16
filename="submission/blended_submission_{:%Y-%m-%d_%H_%M}.csv".format(datetime.now())
submission.to_csv(filename, index=False)
# %% rank blender: https://www.kaggle.com/roydatascience/blender-of-0-901-solutions
import numpy as np
import pandas as pd
import os
from scipy.stats import rankdata
LABELS = ["target"]
predict_list = []
predict_list.append(pd.read_csv('submission/lgb_submission_42.csv')[LABELS].values)
predict_list.append(pd.read_csv('submission/lgb_submission_72.csv')[LABELS].values)
predict_list.append(pd.read_csv('submission/lgb_submission_seed0.csv')[LABELS].values)
predict_list.append(pd.read_csv('submission/lgb_submission_seed1.csv')[LABELS].values)
predict_list.append(
|
pd.read_csv('submission/lgb_submission_seed2.csv')
|
pandas.read_csv
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = pd.period_range("2014-05-01", periods=3, freq="2D")
expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = pd.period_range("2014-05-01", periods=6, freq=freqstr)
expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-04-28", "2014-05-12", freq="D")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
other = not_daily
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
other = not_hourly
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_add_iadd_timedeltalike_annual(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range("2019", "2029", freq="A")
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014", "2024", freq="A")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_M(self):
rng = pd.period_range("2014-01", "2016-12", freq="M")
expected = pd.period_range("2014-06", "2017-05", freq="M")
result = rng + pd.offsets.MonthEnd(5)
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014-01", "2016-12", freq="M")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_parr_add_sub_td64_nat(self, box_transpose_fail):
# GH#23320 special handling for timedelta64("NaT")
box, transpose = box_transpose_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
other = np.timedelta64("NaT")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
|
tm.assert_equal(result, expected)
|
pandas.util.testing.assert_equal
|
import sys
import pandas as pd
import sqlite3
from sqlalchemy import create_engine
import nltk
from sklearn.externals import joblib
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
import re
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import classification_report
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
class MessageLengthExtractor(BaseEstimator, TransformerMixin):
def message_length(self, text):
'''
Returns the number of characters in text
'''
return len(text)
def fit(self, x, y=None):
'''
Overriding function from baseclass, fits the object
'''
return self
def transform(self, X):
'''
Overriding function from baseclass, transforms the object
'''
X_msg_len = pd.Series(X).apply(self.message_length)
return (pd.DataFrame(X_msg_len))
class StartingNounExtractor(BaseEstimator, TransformerMixin):
def starting_noun(self, text):
'''
Is there a sentence that starts with a Noun
'''
sentences= nltk.sent_tokenize(text)
for sentence in sentences:
parts_of_speech_tags = nltk.pos_tag(tokenize(sentence))
word_1, tag_1 = parts_of_speech_tags[0]
if(tag_1[:2]=='NN'):
return True
return False
def fit(self, X, y=None):
'''
Overriding function from baseclass, fits the object
'''
return self
def transform(self, X):
'''
Overriding function from baseclass, transforms the object
'''
X_tagged =
|
pd.Series(X)
|
pandas.Series
|
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import tree
import pydotplus
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
import time
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
def MySupportVectorMachine():
#Creazione del classificatore
classifier = SVC(C=10, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=1, shrinking=True, tol=0.001,
verbose=False)
#Si allena il classificatore
classifier.fit(X_train, y_train)
# E ora si predice sul Test Set
predicted = classifier.predict(X_test)
#Rinomino i campi per la matrice di confusione
labels = ("Female","Male")
positions = (0,1)
#Stampa dei risultati
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(y_test, predicted)))
disp = metrics.plot_confusion_matrix(classifier, X_test, y_test, cmap="OrRd")
disp.figure_.suptitle("Confusion Matrix")
print("Confusion matrix:\n%s" % disp.confusion_matrix)
#Stampa del tempo intercorso per processare il classificatore
print ("\nTempo trascorso: {:.2f}m\n".format((time.time()-start_time)/60))
#Stampa a video della matrice di confusione
plt.xticks(positions,labels)
plt.yticks(positions,labels)
plt.savefig('OutputSVM/ConfusionMatrix.png', bbox_inches='tight')
plt.show()
#Stampa dell'accuratezza
from sklearn.metrics import accuracy_score
print("Accuratezza: ")
print(accuracy_score(y_test, predicted))
def MyDecisionTree():
#The decision tree classifier
clf = tree.DecisionTreeClassifier(criterion = "gini", max_depth= 13)
#Alleno il decision tree
clf_train = clf.fit(X_train, y_train)
#Predico la risposta per il dataset
y_pred = clf.predict(X_test)
#Model Accuracy, valuto il modello
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
#Creo un decision tree in formato DOT utilizzando GraphViz
dot_data = tree.export_graphviz(clf_train, out_file=None, feature_names=X_train.columns.values,
class_names=['Female', 'Male'], rounded=True, filled=True) #Gini decides which attribute/feature should be placed at the root node, which features will act as internal nodes or leaf nodes
#Creo il decision tree in formato Graph partendo dal formato DOT
graph = pydotplus.graph_from_dot_data(dot_data)
#Salvo in png il decision tree creato
test2 = graph.write_png("OutputDT/GraphDecisionTree.png")
print ("\nTempo trascorso: {:.2f}m\n".format((time.time()-start_time)/60))
def MyNearestNeighbors():
#NearestNeighbors classifier
classifier = KNeighborsClassifier(n_neighbors=9)
#Alleno il classificatore
clf_train = classifier.fit(X_train, y_train)
#Predico la risposta per il dataset
y_pred = classifier.predict(X_test)
labels = ("Female","Male")
positions = (0,1)
#Stampa dei risultati
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(y_test, y_pred)))
disp = metrics.plot_confusion_matrix(classifier, X_test, y_test, cmap=plt.cm.Blues)
disp.figure_.suptitle("Confusion Matrix")
print("Confusion matrix:\n%s" % disp.confusion_matrix)
# Model Accuracy, valuto il modello
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
print("\nTempo trascorso: {:.2f}m\n".format((time.time() - start_time) / 60))
#Stampa a video
plt.xticks(positions,labels)
plt.yticks(positions,labels)
plt.savefig('OutputKNN/ConfusionMatrixBalancedWeb_n9.png', bbox_inches='tight')
plt.show()
#Funzione creazione dataset bilanciato
def CreateBalanced4c4s(dfconc):
#Creo un csv corrispondente al dataset4c4s ma con l'aggiunta della colonna "Gender"
dfconc.to_csv("DatasetCelebA/Dataset4c4sBalanced1000.csv", header = False, index = False)
#Leggo il csv appena creato per andare ad eseguire le operazioni di manipolazione
DFbalanced = pd.read_csv("DatasetCelebA/Dataset4c4sBalanced1000.csv",header = None)
#Salvo in un dataframe tutte le righe con gender pari a 1(uomo)
dfBalanceM = DFbalanced.loc[DFbalanced[64] == 1]
#Salvo in un dataframe tutte le righe con gender pari a -1(donna)
dfBalanceF = DFbalanced.loc[DFbalanced[64] == -1]
#Droppo le righe in eccesso del dataframe femminile (rispetto al dataframe maschile)
dfBalanceF = dfBalanceF.iloc[0:432]
#Unisco i due dataframe aventi lo stesso numero di elementi
DFbalanced = pd.concat([dfBalanceM,dfBalanceF], axis = 0)
#Creo il csv corrispondente
DFbalanced.to_csv("DatasetCelebA/Dataset4c4sBalanced1000.csv", header = False, index = False)
#Funzione per suddividere il dataset bilanciato in train e test set
def ExecOnBalanced():
#Leggo dataset bilanciato
dataframe = pd.read_csv("DatasetCelebA/Dataset4c4sBalanced1000.csv", header=None)
#Rinomino la colonna 64 in Gender.
dataframe = dataframe.rename(columns={dataframe.columns[64]: "Gender"}) # -1 donna e 1 maschio
#Ottengo feature variables
feature_cols = list(dataframe.columns.values)
X = feature_cols[1:len(feature_cols) - 1]
X = dataframe[X]
#Ottengo target variables
y = dataframe.Gender
#Divido il dataframe in train e test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=1) # 70% training and 30% test
return X_train,X_test,y_train,y_test
#Funzione lettura CSV su cui eseguire i test
def ReadCSV():
# Caricamento dei due dataset
dataframe = pd.read_csv("DatasetCelebA/dataset4c4s1000.csv", header=None)
feature =
|
pd.read_csv("DatasetCelebA/list_attr_celeba.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""rewards.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1wSaVp_tg-Thk_ih7KFoSLloaejQLe_v7
"""
import pandas as pd
import plotly.express as px
import numpy as np
data1 = np.asarray([
[-74.33685869, -72.5324857, -88.47022494, -61.16093581, -660.2188411, -34.48468219, -52.5229316, -59.32260936],
[-90.88501301, -46.11791824, -83.34722873, -94.2334051, -815.6631797, -61.06891306, -57.7632315, -47.8487639],
[-73.91411699, -77.59040873, -77.01557029, -92.72689018, -600.7604739, -59.89977608, -57.68274631, -64.39186185],
[-192.5411993, -24.95264984, -110.5459243, -74.78287134, -241.33862, -68.36011578, -57.71062246, -70.30127793],
[-70.56101865, -53.43896026, -75.05082911, -59.63233941, -417.1066496, -62.21954958, -51.147781, -77.42080861],
[-5.999697612, -59.11368912, -69.38302732, -61.16266931, -223.087533, -64.95227382, -55.6735378, -76.22536499],
[153.107842, -14.58965759, -39.34082378, -59.62059657, -758.0064858, -70.86303994, -53.00074024, -52.84115631],
[159.2601384, -24.20373697, -24.75340317, -71.05858162, -543.6898402, -70.5126974, -44.0885844, -47.21479324],
[167.520669, -51.60538555, -79.71618173, -58.89638826, -690.0980123, -54.12362492, -54.36445826, -55.11445826]
])
data2 = np.asarray([
[-213.1039408, -86.60203251, -113.0851182, -108.8752598, -1049.245347, -100.0107539, -107.0070661, -99.5741411],
[-187.8262188, -116.0819553, -98.91194532, -106.2171531, -1044.407354, -134.9483129, -114.1879624, -115.5642645],
[-91.612578, -130.218187, -120.0383136, -119.0852531, -1238.74529, -109.9179512, -110.5007863, -120.1931743],
[-86.60450772, -116.5197368, -105.3994866, -108.8804071, -1167.584126, -125.4021974, -120.1197327, -122.5280382],
[-86.16190285, -123.8487977, -87.23595413, -125.1815342, -1161.814595, -120.1925051, -99.36077911, -114.6224723],
[-82.59160215,-124.054699, -88.14533935, -124.4893578, -1143.869427, -109.8486868, -124.6037669, -99.14100639],
[103.4123898, -95.06164134, -110.7838212, -111.850257, -1048.138292, -122.6433241, -124.3869051, -110.4287416],
[190.4916607, -112.0394803, -103.6646041, -115.3407975, -1130.158999, -98.2028528, -99.06896169, -109.5575971],
[210.56, -86.60203251, -147.9139065, -147.1861822, -1252.134843, -73.82300449, -175.8197084, -125.9059582]
])
data3 = np.asarray([
[-143.7203998, -79.56725911, -100.7776716, -85.01809782, -854.732094, -67.24771804, -79.76499886, -79.44837523],
[-139.3556159, -81.09993674, -91.12958703, -100.2252791, -930.0352668, -98.008613, -85.97559695, -81.7065142],
[-82.76334749, -103.9042979, -98.52694197, -105.9060716, -919.752882, -84.90886363, -84.09176629, -92.2925181],
[-139.5728535, -70.73619331, -107.9727054, -91.83163922, -704.4613729, -96.8811566, -88.91517758, -96.41465804],
[-78.36146075, -88.64387899, -81.14339162, -92.40693682, -789.4606221, -91.20602736, -75.25428006, -96.02164045],
[-44.29564988, -91.58419406, -78.76418333, -92.82601353, -683.4784799, -87.40048032, -90.13865235, -87.68318569],
[128.2601159, -54.82564946, -75.06232252, -85.73542681, -903.0723888, -96.75318203, -88.69382266, -81.63494894],
[174.8758995, -68.12160865, -64.20900363, -93.19968954, -836.9244197, -84.3577751, -71.57877304, -78.38619518],
[189.0403345, -69.10370903, -113.8150441, -103.0412853, -971.1164276, -63.97331471, -115.0920833, -90.5102082]
])
for i in range(1, 4):
path = globals()[f"data{i}"]
globals()[f"df{i}"] =
|
pd.DataFrame(path)
|
pandas.DataFrame
|
import numpy as np
import pandas as pa
import seaborn as sn
import matplotlib.pyplot as plt
import warnings
#import gensim
import os
from gensim import models,corpora,similarities
from gensim.models import LdaModel
from nltk import FreqDist
from scipy.stats import entropy
from nltk.tokenize import TweetTokenizer,word_tokenize
warnings.filterwarnings('ignore')
sn.set_style("darkgrid")
### Read all the data cleaned
filedir = os.path.abspath(r"C:\Users\BABI\Dynamic Blog Recommendation\Cleaned Data")
medium_filename = "cleaned_medium"
ana_filename = "cleaned_analytics"
toward_filename = "cleaned_towards_data_science"
toward_filepath = os.path.join(filedir,toward_filename)
medium_filepath = os.path.join(filedir,medium_filename)
ana_filepath = os.path.join(filedir,ana_filename)
data_medium = pa.read_csv(medium_filepath)
data_medium['Webpage'] = 'Medium'
data_toward = pa.read_csv(toward_filepath)
data_toward['Webpage'] = 'Towards_Data_Science'
data_toward = data_toward.rename(columns={'Link':'Links'})
data_ana = pa.read_csv(ana_filepath)
data_ana = data_ana.rename(columns={'Titles':'Title'})
data_ana['Webpage'] = 'Analytics_Vidhya'
data = pa.concat([data_medium,data_toward])
data =
|
pa.concat([data,data_ana])
|
pandas.concat
|
import numpy as np
import pandas as pd
import fiona
import io
from shapely import geometry
import click
from wit_tooling import query_wit_data
def shape_list(key, values, shapefile):
"""
Get a generator of shapes from the given shapefile
key: the key to match in 'properties' in the shape file
values: a list of property values
shapefile: the name of your shape file
e.g. key='ORIGID', values=[1, 2, 3, 4, 5],
shapefile='/g/data/r78/DEA_Wetlands/shapefiles/MDB_ANAE_Aug2017_modified_2019_SB_3577.shp'
"""
count = len(values)
with fiona.open(shapefile) as allshapes:
for shape in allshapes:
shape_id = shape['properties'].get(key)
if shape_id is None:
continue
if isinstance(shape_id, float):
shape_id = int(shape_id)
if shape_id in values:
yield(shape_id, shape)
count -= 1
if count <= 0:
break
def get_areas(features, pkey='SYSID'):
"""
Calculate the area of a list/generator of shapes
input:
features: a list of shapes indexed by the key
output:
a dataframe of area index by the key
"""
re = pd.DataFrame()
for f in features:
va = pd.DataFrame([[f[0], geometry.shape(f[1]['geometry']).area/1e4]], columns=[pkey, 'area'])
re = re.append(va, sort=False)
return re.set_index(pkey)
def dump_wit_data(key, feature_list, output, batch=-1):
"""
dump wit data from the database into a file
input:
key: Name to id the polygon
feature_list: a list or generator of features
output:
a csv file to save all the wit data
"""
count = 0
if batch > 0:
fname = output.split('.')[0]
sub_fname = fname + '_0.csv'
appendix = 0
else:
sub_fname = output
for f_id, f in feature_list:
_, wit_data = query_wit_data(f)
csv_buf = io.StringIO()
wit_df = pd.DataFrame(data=wit_data, columns=['TIME', 'BS', 'NPV', 'PV', 'WET', 'WATER'])
wit_df.insert(0, key, f_id)
wit_df.to_csv(csv_buf, index=False, header=False)
csv_buf.seek(0)
with open(sub_fname, 'a') as f:
f.write(csv_buf.read())
if batch < 0:
continue
count += 1
if count >= batch:
with open(sub_fname, 'a') as f:
f.write(','.join(list(wit_df.columns)))
count = 0
appendix += 1
sub_fname = fname + '_' + str(appendix) + '.csv'
if count < batch or batch < 0:
with open(sub_fname, 'a') as f:
f.write(','.join(list(wit_df.columns)))
def annual_metrics(wit_data, members=['PV', 'WET', 'WATER', 'BS', 'NPV', ['NPV', 'PV', 'WET'],
['PV', 'WET'], ['WATER', 'WET']], threshold=[25, 75], pkey='SYSID'):
"""
Compute the annual max, min, mean, count with given wit data, members and threshold
input:
wit_data: dataframe of WIT
members: the elements which the metrics are computed against, can be a column from wit_data, e.g. 'PV'
or the sum of wit columns, e.g. ['WATER', 'WET']
threshold: a list of thresholds such that (elements >= threshold[i]) is True,
where i = 0, 1...len(threshold)-1
output:
dataframe of metrics
"""
years = wit_data['TIME']
i = 0
wit_df = wit_data.copy(deep=True)
for m in members:
if isinstance(m, list):
wit_df.insert(wit_df.columns.size+i, '+'.join(m), wit_df[m].sum(axis=1))
years = pd.DatetimeIndex(wit_df['TIME']).year.unique()
shape_id_list = wit_df[pkey].unique()
#shane changed 4 to 5 to accomodate median added below
wit_metrics = [pd.DataFrame()] * 5
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).max()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_max' for n in wit_yearly.columns[1:]})
wit_metrics[0] = wit_metrics[0].append(wit_yearly, sort=False)
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).min()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_min' for n in wit_yearly.columns[1:]})
wit_metrics[1] = wit_metrics[1].append(wit_yearly, sort=False)
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).mean()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_mean' for n in wit_yearly.columns[1:]})
wit_metrics[2] = wit_metrics[2].append(wit_yearly, sort=False)
#*********************** START ADDED BY SHANE ***********************
#adding median
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).median()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_median' for n in wit_yearly.columns[1:]})
wit_metrics[3] = wit_metrics[3].append(wit_yearly, sort=False)
#*********************** END ADDED BY SHANE ***********************
for y in years:
wit_yearly = wit_df[
|
pd.DatetimeIndex(wit_df['TIME'])
|
pandas.DatetimeIndex
|
"""
.. module:: run_model
:synopsis: Collection of Models
.. moduleauthor:: <NAME>
This modules consists of collection of various machine learning models. We start with Light GBM.
Depending on the time, we can add more
Todo:
* Add more machine learning models, such as GBM, RF and XGBoost
* Spark Compatible GBM and Light GBM Models
* Add Model Diagnostic plots using SHAP Library
* Feature Reduction
* Config file
"""
import sys
sys.path.append('.')
import os
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import numpy as np
from retail_sales_prediction import logger
def run_model_lgbm(feature_prep, X_train, y_train, X_val, y_val, X_test, config, num_days=6):
"""
Training the Light GBM Model.
Args:
feature_prep:
X_train:
y_train:
X_val:
y_val:
X_test:
num_days:
Returns:
:param model_params:
"""
logger("Training and predicting models...")
# params = {
# 'num_leaves': 3,
# 'objective': 'regression',
# 'min_data_in_leaf': 200,
# 'learning_rate': 0.02,
# 'feature_fraction': 0.8,
# 'bagging_fraction': 0.7,
# 'bagging_freq': 1,
# 'metric': 'l2',
# 'num_threads': 20
# }
params = config['model_params']
# MAX_ROUNDS = 200
MAX_ROUNDS = config['MAX_ROUNDS']
output_dir = config['output_dir']
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger.info('output directory : {}'.fromat(output_dir))
val_pred = []
test_pred = []
cate_vars = []
for i in range(16):
logger.info("=" * 50)
logger.info("Step %d" % (i+1))
logger.info("=" * 50)
dtrain = lgb.Dataset(
X_train, label=y_train[:, i],
categorical_feature=cate_vars,
weight=
|
pd.concat([feature_prep.items["perishable"]] * num_days)
|
pandas.concat
|
#Gillespie's Direct Stochastic Simulation Algorithm Program
#Parser to convert .PSC files to arrays/matrices (speciesArray, parameterArray, and reactionMatrix) suitable for the main simulation code
#Note: the example .psc file was generated from a SBML .xml file using StochPy
#Final Project for BIOEN 6760, Modeling and Analysis of Biological Networks
#<NAME>
#Copyright 2013-2015
import re
import numpy as np
import itertools
import pandas as pd
filer = "BIOMD0000000504.xml.psc"
data = open(filer)
def convertReactants(reactants):
newReactantList = list()
for reactant in reactants:
if '>' not in reactant and ('+' not in reactant and '-' not in reactant):
if '{' in reactant:
m=re.match("{[0-9]*.[0-9]*}",reactant)
num = float(m.group()[1:-1])
reactant = reactant[m.span()[1]:]
newReactantList.append((num,reactant))
else:
newReactantList.append((1,reactant))
else:
newReactantList.append(reactant)
return newReactantList
def convertMe(line, reactants):
reactants = convertReactants(reactants)
flatReactants = list(itertools.chain(*reactants))
line = line.strip()
numMultiply = len(re.findall("\*",line))
equalsLocation = 0
multiplyIter = re.finditer("\*",line)
if(numMultiply==1):
for a in multiplyIter:
multiplyLocation = a.start()
cVal = line[equalsLocation:multiplyLocation]
specieName = line[multiplyLocation+1:len(line)]
if 'Sink' in flatReactants:
return (1,cVal,-reactants[0][0],reactants[0][1],0,0,0,reactants[2][1],0,0)
elif 'Source' in flatReactants:
return (1,cVal,-0,reactants[0][1], 0,0,reactants[2][0],reactants[2][1],0,0)
elif '$pool' in flatReactants:
return (1,cVal,-0,reactants[0][1], 0,0,reactants[2][0],reactants[2][1],0,0)
elif len(reactants)==5:
return (1,cVal,-reactants[0][0],reactants[0][1],0,0, reactants[2][0],reactants[2][1],reactants[4][0],reactants[4][1])
else:
return (1,cVal,-reactants[0][0],reactants[0][1],0,0, reactants[2][0],reactants[2][1],0,0)
elif(numMultiply==2):
for i,a in enumerate(multiplyIter):
if i==0:
multiplyLocation1 = a.start()
cVal = line[equalsLocation:multiplyLocation1]
elif i==1:
multiplyLocation2 = a.start()
specie1 = line[multiplyLocation1:multiplyLocation2][1:]
specie2 = line[multiplyLocation2:len(line)][1:]
if len(reactants)==3:
if specie1 not in flatReactants:
return (2,cVal,-reactants[0][0],reactants[0][1],0,specie1,reactants[2][0],reactants[2][1],0,0)
elif specie2 not in flatReactants:
return (2,cVal,-reactants[0][0],reactants[0][1],0,specie2,reactants[2][0],reactants[2][1],0,0)
reactionArrowIndex = reactants.index('>')
if reactionArrowIndex==3:
return (2,cVal,-reactants[0][0],reactants[0][1],-reactants[2][0],reactants[2][1],reactants[4][0],reactants[4][1],0,0)
elif reactionArrowIndex==1:
if specie1 not in flatReactants:
return (2,cVal,-reactants[0][0],reactants[0][1],0,specie1,reactants[2][0],reactants[2][1],reactants[4][0],reactants[4][1])
elif specie2 not in flatReactants:
return (2,cVal,-reactants[0][0],reactants[0][1],0,specie2,reactants[2][0],reactants[2][1],reactants[4][0],reactants[4][1])
elif(numMultiply==3):
for i,a in enumerate(multiplyIter):
if i==0:
multiplyLocation1 = a.start()
cVal = line[equalsLocation:multiplyLocation1]
elif i==1:
multiplyLocation2 = a.start()
minusOneLocation = line.find("-1.0")
specie1 = line[multiplyLocation1:multiplyLocation2][1:]
convertReactants(reactants)
return (3,cVal,-reactants[0][0],reactants[0][1],0,0,reactants[2][0],reactants[2][1],0,0)
reactionList = list()
specieDict = dict()
parameterDict = dict()
reactionFlag = bool()
fixedSpeciesFlag = bool()
variableSpeciesFlag = bool()
parameterFlag = bool()
targetLine1 = int()
targetLine2 = int()
subLine = str()
for i,line in enumerate(data):
if "# Reactions" in line:
reactionFlag = True
if "# Fixed species" in line:
fixedSpeciesFlag = True
if "# Variable species" in line:
variableSpeciesFlag = True
if "# Parameters" in line:
parameterFlag = True
if reactionFlag==True and fixedSpeciesFlag==False:
if ":" in line and "#" not in line:
targetLine1 = i+1
targetLine2 = i+2
elif i==targetLine1:
subLine = line.strip()
elif i==targetLine2:
reactionList.append(convertMe(line,subLine.split(' ')))
elif variableSpeciesFlag==True and parameterFlag==False:
if "=" in line:
subLine = line.strip()
equalsLocation = subLine.find("=")
specie = subLine[0:equalsLocation-1].split('@')[0]
specieQuantity = subLine[equalsLocation+2:len(subLine)]
specieDict[specie] = float(specieQuantity)
elif parameterFlag==True:
if "=" in line:
subLine = line.strip()
equalsLocation = subLine.find("=")
parameter = subLine[0:equalsLocation-1]
parameterValue = subLine[equalsLocation+2:len(subLine)]
parameterDict[parameter] = float(parameterValue)
specieDict['Source'] = 1;
specieDict['Sink'] = 1;
specieDict['$pool'] = 1;
def findIndex(inputReactant,inputReactantDict):
if(inputReactant == 0):
return 0
else:
return inputReactantDict.keys().index(inputReactant)
reactionMatrixList = list()
for reaction in reactionList:
reactionMatrixList.append((reaction[0],findIndex(reaction[1],parameterDict),reaction[2],findIndex(reaction[3],specieDict),reaction[4],findIndex(reaction[5],specieDict),reaction[6],findIndex(reaction[7],specieDict),reaction[8],findIndex(reaction[9],specieDict)))
reactionMatrix = np.array(reactionMatrixList,dtype='int32')
reactionMatrix = reactionMatrix[reactionMatrix[:,0].argsort()] #Sort the arrays by reaction type to minimize branch divergence / warp divergence when calculating propensities
#Make Parameter Array
parameterList = list()
parameterIndices = reactionMatrix[:,1]
for subParameterIndex in parameterIndices:
parameterList.append(parameterDict.values()[int(subParameterIndex)])
parameterArray = np.array(parameterList,dtype='float32')
#Make Specices Array
speciesArray = np.array(specieDict.values(),dtype='int32')
#Make Reaction Matrix
reactionMatrix = np.delete(reactionMatrix,1,axis=1)
reactionDF =
|
pd.DataFrame(reactionMatrix)
|
pandas.DataFrame
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
|
tm.assert_equal(expected, vec + off)
|
pandas._testing.assert_equal
|
import pandas as pd
import numpy as np
import mapping as mp
from . import strategy
def get_relative_to_expiry_instrument_weights(dates, root_generics, expiries,
offsets, all_monthly=False,
holidays=None):
"""
Generate instrument weights for each root generic where the position is
rolled entirely in one day based on an offset from the earlier of the
contracts First Notice Date and Last Trade Date.
Parameters
----------
dates: Iterable
Iterable of pandas.Timestamps, dates to generate instrument weights
for.
root_generics: dict
Dictionary with key as root generic and value as list of future
generics, e.g. {"CL": ["CL1", "CL2"]}
expiries: pd.DataFrame
A pd.DataFrame with columns ["contract", "first_notice",
"last_trade"] where "first_notice" and "last_trade" must be
parseable to datetimes with format %Y-%m-%d and "contract" must be
a string in the form YYYYNNC representing the contract name, e.g.
"2007ESU".
offsets: int or dict
Number of business days to roll relative to earlier of the
instruments First Notice and Last Trade date. If int is given use
the same number for all futures, if dict is given keys must cover
all root generics and contain an integer for each.
all_monthly: boolean
Whether to roll each contract individually based on the offset from
the earlier of its First Notice and Last Trade date or to roll all
contracts with the same month code based on the earliest date.
holidays: list
list of timezone aware pd.Timestamps used for holidays when calculating
relative date roll logic.
Returns
-------
A dictionary of DataFrames of instrument weights indexed by root
generic, see mapper.mappings.roller()
Examples
--------
>>> import strategy.rebalance as rebal
>>> import pandas as pd
>>> dts = pd.date_range("2018-01-01", "2018-02-01", freq="B")
>>> rg = {"CL": ["CL1"], "ES": ["ES1"]}
>>> exp = pd.DataFrame(
... [["2018CLF", "2018-01-28", "2018-01-27"],
... ["2018CLG", "2018-02-28", "2018-02-27"],
... ["2018ESF", "2018-01-20", "2018-01-21"],
... ["2018ESG", "2018-02-20", "2018-02-21"]],
... columns=["contract", "first_notice", "last_trade"]
... )
>>> offsets = -5
>>> rebal.get_relative_to_expiry_instrument_weights(dts, rg, exp, offsets)
"""
close_by = _close_by_dates(expiries, all_monthly)
cntrct_close_by_dates = {}
for grp, dts in close_by.groupby("root_generic"):
cntrct_close_by_dates[grp] = dts.loc[:, "close_by"]
wts = {}
for root in root_generics:
gnrcs = root_generics[root]
cols =
|
pd.MultiIndex.from_product([gnrcs, ['front', 'back']])
|
pandas.MultiIndex.from_product
|
import tempfile
import pandas
from dnm_cohorts.download_file import download_file
from dnm_cohorts.person import Person
from dnm_cohorts.convert_pdf_table import extract_pages, convert_page
url = 'https://ars.els-cdn.com/content/image/1-s2.0-S0140673612614809-mmc1.pdf'
def extract_table(handle):
records = []
header = None
for page in extract_pages(handle, start=12, end=16):
data = convert_page(page, delta=2.7)
data = sorted(data, reverse=True, key=lambda x: x.y0)
data = data[1:]
header, data = data[0], data[1:]
for line in data:
text = [ x.get_text() for x in sorted(line, key=lambda x: x.x0) ]
if len(text) > 10:
records.append(text)
header = [ x.get_text() for x in sorted(header, key=lambda x: x.x0) ]
data =
|
pandas.DataFrame.from_records(records, columns=header)
|
pandas.DataFrame.from_records
|
import pandas as pd
from fbprophet import Prophet
from fbprophet.plot import plot_plotly, plot_components_plotly
from sklearn.metrics import mean_squared_error
from math import sqrt
#Lendo arquivos em parquet
df_customer=pd.read_parquet('gs://stack-labs-list/processing/df_customer')
df_geolocation=pd.read_parquet('gs://stack-labs-list/processing/df_geolocation')
df_order_items=pd.read_parquet('gs://stack-labs-list/processing/df_order_items')
df_order_payments=pd.read_parquet('gs://stack-labs-list/processing/df_order_payments')
df_order_reviews=pd.read_parquet('gs://stack-labs-list/processing/df_order_reviews')
df_orders=pd.read_parquet('gs://stack-labs-list/processing/df_orders')
df_products=pd.read_parquet('gs://stack-labs-list/processing/df_products')
df_sellers=pd.read_parquet('gs://stack-labs-list/processing/df_sellers')
df_category_name=pd.read_parquet('gs://stack-labs-list/processing/df_category_name')
#Executando merge
df = df_orders.merge(df_order_items, on='order_id', how='left')
df = df.merge(df_order_payments, on='order_id', how='outer', validate='m:m')
df = df.merge(df_order_reviews, on='order_id', how='outer')
df = df.merge(df_products, on='product_id', how='outer')
df = df.merge(df_customer, on='customer_id', how='outer')
df = df.merge(df_sellers, on='seller_id', how='outer')
df['order_purchase_timestamp'] = pd.to_datetime(df['order_purchase_timestamp']).dt.date
df1 = df.groupby('order_purchase_timestamp')['product_id'].count().reset_index()
split_point = len(df1)-190
df_model = df1[0:split_point]
validation = df1[split_point:]
df_model = df_model[['order_purchase_timestamp', 'product_id']].rename(columns = {'order_purchase_timestamp': 'ds', 'product_id': 'y'})
validation = validation[['order_purchase_timestamp', 'product_id']].rename(columns = {'order_purchase_timestamp': 'ds', 'product_id': 'y'})
df_validation = pd.DataFrame({'ds': validation['ds']})
model = Prophet()
model.fit(df_model)
saida = model.predict(df_validation)
rmse = sqrt(mean_squared_error(validation['y'], saida.yhat))
print('Test RMSE: %.3f' % rmse)
future = model.make_future_dataframe(periods=50, freq='M')
forecast = model.predict(future)
def transformar_estado(valor):
if valor == 'AC':
return 'Norte'
elif valor == 'AP':
return 'Norte'
elif valor == 'AM':
return 'Norte'
elif valor == 'PA':
return 'Norte'
elif valor == 'RO':
return 'Norte'
elif valor == 'RR':
return 'Norte'
elif valor == 'TO':
return 'Norte'
elif valor == 'AL':
return 'Nordeste'
elif valor == 'BA':
return 'Nordeste'
elif valor == 'CE':
return 'Nordeste'
elif valor == 'MA':
return 'Nordeste'
elif valor == 'PB':
return 'Nordeste'
elif valor == 'PE':
return 'Nordeste'
elif valor == 'PI':
return 'Nordeste'
elif valor == 'RN':
return 'Nordeste'
elif valor == 'SE':
return 'Nordeste'
elif valor == 'DF':
return 'Centro-Oeste'
elif valor == 'GO':
return 'Centro-Oeste'
elif valor == 'MT':
return 'Centro-Oeste'
elif valor == 'MS':
return 'Centro-Oeste'
elif valor == 'ES':
return 'Sudeste'
elif valor == 'RJ':
return 'Sudeste'
elif valor == 'MG':
return 'Sudeste'
elif valor == 'SP':
return 'Sudeste'
else:
return 'Sul'
df['customer_region'] = df['customer_state'].map(transformar_estado)
df['seller_region'] = df['seller_state'].map(transformar_estado)
df_region_1 = df[(df['customer_region'] == 'Nordeste') | (df['customer_region'] == 'Norte') | (df['customer_region'] == 'Centro-Oeste')]
df_region_1['order_purchase_timestamp'] =
|
pd.to_datetime(df_region_1['order_purchase_timestamp'])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 9 15:33:46 2018
@author: <NAME>
"""
import cantera as ct
from .. import simulation as sim
from ...cti_core import cti_processor as ctp
import pandas as pd
import numpy as np
import time
import copy
import re
import scipy.optimize
class free_flame(sim.Simulation):
'''child class of sim.Simulaton. Inherits all attributes and methods including __init__().
Also has internal init due to data requirements'''
def __init__(self,pressure:float,temperature:float,observables:list,
kineticSens:int,physicalSens:int,conditions:dict,thermalBoundary,
processor:ctp.Processor=None,cti_path="",
save_physSensHistories=0,moleFractionObservables:list=[],
absorbanceObservables:list=[],concentrationObservables:list=[],
fullParsedYamlFile:dict={},flame_width:float=1.0,
save_timeHistories:int=0,T_profile=
|
pd.DataFrame(columns=['z','T'])
|
pandas.DataFrame
|
import os
import random
import sys
import pandas as pd
from ATM import welcome
from Validate import validateDetails2, validateLogin
filePath = r".\{}.csv".format("atm")
if not os.path.isfile(filePath) or os.path.getsize(filePath) == 0:
df = pd.DataFrame({"firstName": [], "lastName": [], "email": [], "address": [], "accountNumber": [],
"password": [], "contact": []})
df.to_csv(filePath, index=False)
def generateAccountNumber():
accountNumber = random.randrange(1000000000, 9999999999)
boolAccount = validateDetails2(accountNumber=accountNumber) # Checks if account number has been used or not.
if not boolAccount: # Recreates account number until un-used account generated
generateAccountNumber()
print("Account number Validated!")
return accountNumber # Returns generated un-used account number.
def register():
# To register, we need Name, Email, Account number, Password, Address, contact
print("\n" + "-" * 15 + "REGISTRATION" + "-" * 15)
welcomePrompt = "\nTake a few minutes to register an account with us.\n" \
"Please fill the following details as accurately as possible\n"
print(welcomePrompt)
firstName = input("Enter your First name: ")
lastName = input("Enter your Last name: ")
email = input("Enter your Email Address: ")
address = input("Enter your Home Address: ")
while True: # Get contact until up to 11 digits is entered
contact = input("Enter your Phone number: ")
if len(contact) == 11:
break
else:
print("Incorrect Phone number, try again.")
boolValue = validateDetails2(contact=contact, email=email) # Check if details exist
if boolValue:
print("\nGenerating Account Number...")
accountNumber = str(generateAccountNumber())
print("Your Account Number is", accountNumber)
while True:
password = input("Enter password (must be 8 digits or more): ")
if len(password) >= 8:
break
print("\nPlease take note of your account number and password.\n")
registration = {"firstName": firstName, "lastName": lastName, "email": email, "address": address,
"accountNumber": accountNumber, "password": password, "contact": contact, "Balance": 0}
data =
|
pd.read_csv(filePath, dtype=str)
|
pandas.read_csv
|
import argparse
from typing import List
import pandas as pd
from datetime import datetime, timedelta
from gamestonk_terminal.government import quiverquant_model
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
check_positive,
plot_autoscale,
)
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal import feature_flags as gtff
def last_congress(other_args: List[str]):
"""Last congress trading
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="last_congress",
description="""
Last congress trading. [Source: www.quiverquant.com]
""",
)
parser.add_argument(
"-p",
"--past_transactions_days",
action="store",
dest="past_transactions_days",
type=check_positive,
default=5,
help="Past transaction days",
)
parser.add_argument(
"-r",
"--representative",
action="store",
dest="representative",
type=str,
default="",
help="Congress representative",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_congress = quiverquant_model.get_congress_trading()
if df_congress.empty:
print("No congress trading data found\n")
return
df_congress = df_congress.sort_values("TransactionDate", ascending=False)
df_congress = df_congress[
df_congress["TransactionDate"].isin(
df_congress["TransactionDate"].unique()[
: ns_parser.past_transactions_days
]
)
]
df_congress = df_congress[
[
"TransactionDate",
"Ticker",
"Representative",
"Transaction",
"House",
"Range",
"ReportDate",
]
].rename(
columns={"TransactionDate": "Transaction Date", "ReportDate": "Report Date"}
)
if ns_parser.representative:
df_congress_rep = df_congress[
df_congress["Representative"].str.split().str[0]
== ns_parser.representative
]
if df_congress_rep.empty:
print(
f"No representative {ns_parser.representative} found in the past {ns_parser.past_transactions_days}"
f" days. The following are available: "
f"{', '.join(df_congress['Representative'].str.split().str[0].unique())}"
)
else:
print(df_congress_rep.to_string(index=False))
else:
print(df_congress.to_string(index=False))
print("")
except Exception as e:
print(e, "\n")
def buy_congress(other_args: List[str]):
"""Top buy congress trading
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="buy_congress",
description="""
Top buy congress trading. [Source: www.quiverquant.com]
""",
)
parser.add_argument(
"-p",
"--past_transactions_months",
action="store",
dest="past_transactions_months",
type=check_positive,
default=6,
help="Past transaction months",
)
parser.add_argument(
"-t",
"--top",
action="store",
dest="top_num",
type=check_positive,
default=10,
help="Number of top tickers",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_congress = quiverquant_model.get_congress_trading()
if df_congress.empty:
print("No congress trading data found\n")
return
df_congress = df_congress.sort_values("TransactionDate", ascending=False)
start_date = datetime.now() - timedelta(
days=ns_parser.past_transactions_months * 30
)
df_congress["TransactionDate"] = pd.to_datetime(df_congress["TransactionDate"])
df_congress = df_congress[df_congress["TransactionDate"] > start_date].dropna()
df_congress["min"] = df_congress["Range"].apply(
lambda x: x.split("-")[0].strip("$").replace(",", "").strip()
)
df_congress["max"] = df_congress["Range"].apply(
lambda x: x.split("-")[1].replace(",", "").strip().strip("$")
if "-" in x
else x.strip("$").replace(",", "")
)
df_congress["lower"] = df_congress[["min", "max", "Transaction"]].apply(
lambda x: float(x["min"])
if x["Transaction"] == "Purchase"
else -float(x["max"]),
axis=1,
)
df_congress["upper"] = df_congress[["min", "max", "Transaction"]].apply(
lambda x: float(x["max"])
if x["Transaction"] == "Purchase"
else -float(x["min"]),
axis=1,
)
df_congress = df_congress.sort_values("TransactionDate", ascending=True)
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_congress.groupby("Ticker")["upper"].sum().sort_values(ascending=False).head(
n=ns_parser.top_num
).plot(kind="bar", rot=0)
plt.ylabel("Amount [$]")
plt.title(
f"Top {ns_parser.top_num} most bought stocks since last {ns_parser.past_transactions_months} "
"months (upper bound)"
)
plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False)
if gtff.USE_ION:
plt.ion()
plt.show()
print("")
except Exception as e:
print(e, "\n")
def sell_congress(other_args: List[str]):
"""Top sell congress trading
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="sell_congress",
description="""
Top sell congress trading. [Source: www.quiverquant.com]
""",
)
parser.add_argument(
"-p",
"--past_transactions_months",
action="store",
dest="past_transactions_months",
type=check_positive,
default=6,
help="Past transaction months",
)
parser.add_argument(
"-t",
"--top",
action="store",
dest="top_num",
type=check_positive,
default=10,
help="Number of top tickers",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_congress = quiverquant_model.get_congress_trading()
if df_congress.empty:
print("No congress trading data found\n")
return
df_congress = df_congress.sort_values("TransactionDate", ascending=False)
start_date = datetime.now() - timedelta(
days=ns_parser.past_transactions_months * 30
)
df_congress["TransactionDate"] = pd.to_datetime(df_congress["TransactionDate"])
df_congress = df_congress[df_congress["TransactionDate"] > start_date].dropna()
df_congress["min"] = df_congress["Range"].apply(
lambda x: x.split("-")[0].strip("$").replace(",", "").strip()
)
df_congress["max"] = df_congress["Range"].apply(
lambda x: x.split("-")[1].replace(",", "").strip().strip("$")
if "-" in x
else x.strip("$").replace(",", "")
)
df_congress["lower"] = df_congress[["min", "max", "Transaction"]].apply(
lambda x: float(x["min"])
if x["Transaction"] == "Purchase"
else -float(x["max"]),
axis=1,
)
df_congress["upper"] = df_congress[["min", "max", "Transaction"]].apply(
lambda x: float(x["max"])
if x["Transaction"] == "Purchase"
else -float(x["min"]),
axis=1,
)
df_congress = df_congress.sort_values("TransactionDate", ascending=True)
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_congress.groupby("Ticker")["lower"].sum().sort_values().abs().head(
n=ns_parser.top_num
).plot(kind="bar", rot=0)
plt.ylabel("Amount [$]")
plt.title(
f"Top {ns_parser.top_num} most sold stocks since last {ns_parser.past_transactions_months} months"
" (upper bound)"
)
plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False)
if gtff.USE_ION:
plt.ion()
plt.show()
print("")
except Exception as e:
print(e, "\n")
def plot_congress(congress: pd.DataFrame, ticker: str):
"""Plot congress trading
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker: str
Ticker to plot congress trading
"""
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
plt.gca().fill_between(
congress["TransactionDate"].unique(),
congress.groupby("TransactionDate")["lower"].sum().values / 1000,
congress.groupby("TransactionDate")["upper"].sum().values / 1000,
)
plt.xlim(
[congress["TransactionDate"].values[0], congress["TransactionDate"].values[-1]]
)
plt.grid()
plt.title(f"Congress trading on {ticker}")
plt.xlabel("Date")
plt.ylabel("Amount [1k $]")
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y/%m/%d"))
plt.gcf().autofmt_xdate()
if gtff.USE_ION:
plt.ion()
plt.show()
def congress(other_args: List[str], ticker: str):
"""Congress trading
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker: str
Ticker to get congress trading data from
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="congress",
description="""
Congress trading. [Source: www.quiverquant.com]
""",
)
parser.add_argument(
"-p",
"--past_transactions_months",
action="store",
dest="past_transactions_months",
type=check_positive,
default=6,
help="Past transaction months",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_congress = quiverquant_model.get_congress_trading(ticker)
if df_congress.empty:
print("No congress trading data found\n")
return
df_congress = df_congress.sort_values("TransactionDate", ascending=False)
start_date = datetime.now() - timedelta(
days=ns_parser.past_transactions_months * 30
)
df_congress["TransactionDate"] =
|
pd.to_datetime(df_congress["TransactionDate"])
|
pandas.to_datetime
|
import pandas as pd
import logging
import numpy as np
import collections
import configparser
import shutil
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
import requests
import io
from astropy.io import fits
from astropy.time import Time
from pathlib import Path
from matplotlib.backends.backend_pdf import PdfPages
import sphere
import sphere.utils as utils
import sphere.toolbox as toolbox
_log = logging.getLogger(__name__)
# WFS wavelength
wave_wfs = 500e-9
class Reduction(object):
'''
SPHERE/SPARTA dataset reduction class
The analysis and plotting code of this class was originally
developed by <NAME> (ESO/IPAG) and based on SAXO tools
from Jean-<NAME> (ONERA). See:
https://github.com/jmilou/sparta
for the code from <NAME>.
'''
##################################################
# Class variables
##################################################
# specify for each recipe which other recipes need to have been executed before
recipe_requirements = collections.OrderedDict([
('sort_files', []),
('sph_sparta_dtts', ['sort_files']),
('sph_sparta_wfs_parameters', ['sort_files']),
('sph_sparta_atmospheric_parameters', ['sort_files']),
('sph_query_databases', ['sort_files']),
('sph_sparta_plot', ['sort_files', 'sph_sparta_dtts', 'sph_sparta_wfs_parameters', 'sph_sparta_atmospheric_parameters']),
('sph_sparta_clean', [])
])
##################################################
# Constructor
##################################################
def __new__(cls, path, log_level='info', sphere_handler=None):
'''
Custom instantiation for the class
The customized instantiation enables to check that the
provided path is a valid reduction path. If not, None will be
returned for the reduction being created. Otherwise, an
instance is created and returned at the end.
Parameters
----------
path : str
Path to the directory containing the dataset
level : {'debug', 'info', 'warning', 'error', 'critical'}
The log level of the handler
sphere_handler : log handler
Higher-level SPHERE.Dataset log handler
'''
#
# make sure we are dealing with a proper reduction directory
#
# init path
path = Path(path).expanduser().resolve()
# zeroth-order reduction validation
raw = path / 'raw'
if not raw.exists():
_log.error('No raw/ subdirectory. {0} is not a valid reduction path'.format(path))
return None
else:
reduction = super(Reduction, cls).__new__(cls)
#
# basic init
#
# init path
reduction._path = utils.ReductionPath(path)
# instrument and mode
reduction._instrument = 'SPARTA'
#
# logging
#
logger = logging.getLogger(str(path))
logger.setLevel(log_level.upper())
if logger.hasHandlers():
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
handler = logging.FileHandler(reduction._path.products / 'reduction.log', mode='w', encoding='utf-8')
formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s')
formatter.default_msec_format = '%s.%03d'
handler.setFormatter(formatter)
logger.addHandler(handler)
if sphere_handler:
logger.addHandler(sphere_handler)
reduction._logger = logger
reduction._logger.info('Creating SPARTA reduction at path {}'.format(path))
#
# configuration
#
reduction._logger.debug('> read default configuration')
configfile = f'{Path(sphere.__file__).parent}/instruments/{reduction._instrument}.ini'
config = configparser.ConfigParser()
reduction._logger.debug('Read configuration')
config.read(configfile)
# reduction parameters
reduction._config = dict(config.items('reduction'))
for key, value in reduction._config.items():
try:
val = eval(value)
except NameError:
val = value
reduction._config[key] = val
#
# reduction and recipe status
#
reduction._status = sphere.INIT
reduction._recipes_status = collections.OrderedDict()
for recipe in reduction.recipe_requirements.keys():
reduction._update_recipe_status(recipe, sphere.NOTSET)
# reload any existing data frames
reduction._read_info()
reduction._logger.warning('#########################################################')
reduction._logger.warning('# WARNING! #')
reduction._logger.warning('# Support for SPARTA files is preliminary. The current #')
reduction._logger.warning('# format of product files may change in future versions #')
reduction._logger.warning('# of the pipeline until an appropriate format is found. #')
reduction._logger.warning('# Please do not blindly rely on the current format. #')
reduction._logger.warning('#########################################################')
#
# return instance
#
return reduction
##################################################
# Representation
##################################################
def __repr__(self):
return '<Reduction, instrument={}, path={}, log={}>'.format(self._instrument, self._path, self.loglevel)
def __format__(self):
return self.__repr__()
##################################################
# Properties
##################################################
@property
def loglevel(self):
return logging.getLevelName(self._logger.level)
@loglevel.setter
def loglevel(self, level):
self._logger.setLevel(level.upper())
@property
def instrument(self):
return self._instrument
@property
def path(self):
return self._path
@property
def files_info(self):
return self._files_info
@property
def dtts_info(self):
return self._dtts_info
@property
def visloop_info(self):
return self._visloop_info
@property
def irloop_info(self):
return self._irloop_info
@property
def atmospheric_info(self):
return self._atmos_info
@property
def recipe_status(self):
return self._recipes_status
@property
def config(self):
return self._config
@property
def status(self):
return self._status
##################################################
# Private methods
##################################################
def _read_info(self):
'''
Read the files, calibs and frames information from disk
files_info : dataframe
The data frame with all the information on files
This function is not supposed to be called directly by the user.
'''
self._logger.info('Read existing reduction information')
# path
path = self.path
# files info
fname = path.preproc / 'files.csv'
if fname.exists():
self._logger.debug('> read files.csv')
files_info = pd.read_csv(fname, index_col=0)
# convert times
files_info['DATE-OBS'] = pd.to_datetime(files_info['DATE-OBS'], utc=False)
files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False)
# update recipe execution
self._update_recipe_status('sort_files', sphere.SUCCESS)
else:
files_info = None
# DTTS info
fname = path.products / 'dtts_frames.csv'
if fname.exists():
self._logger.debug('> read dtts_frames.csv')
dtts_info = pd.read_csv(fname, index_col=0)
# convert times
dtts_info['DATE-OBS'] = pd.to_datetime(dtts_info['DATE-OBS'], utc=False)
dtts_info['DATE'] = pd.to_datetime(dtts_info['DATE'], utc=False)
dtts_info['TIME'] =
|
pd.to_datetime(dtts_info['TIME'], utc=False)
|
pandas.to_datetime
|
"""
Code Contributed by --
<NAME>
"""
import re
import pandas as pd
# script generates a new data file that contains all the data cleaned
datas =
|
pd.read_csv("data.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
import camelot
from test_data import *
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
def test_stream():
pass
def test_stream_table_rotated():
df = pd.DataFrame(data_stream_table_rotated)
filename = os.path.join(testdir, "clockwise_table_2.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert df.equals(tables[0].df)
filename = os.path.join(testdir, "anticlockwise_table_2.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert df.equals(tables[0].df)
def test_stream_table_area():
df = pd.DataFrame(data_stream_table_area_single)
filename = os.path.join(testdir, "tabula/us-007.pdf")
tables = camelot.read_pdf(filename, flavor="stream", table_area=["320,500,573,335"])
assert df.equals(tables[0].df)
def test_stream_columns():
df = pd.DataFrame(data_stream_columns)
filename = os.path.join(testdir, "mexican_towns.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", columns=["67,180,230,425,475"], row_close_tol=10)
assert df.equals(tables[0].df)
def test_lattice():
df =
|
pd.DataFrame(data_lattice)
|
pandas.DataFrame
|
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import json
import pandas as pd
import numpy as np
import colorlover as cl
import plotly
import os
import pdb
import itertools
app = dash.Dash()
app.scripts.config.serve_locally = True
profile_file = os.path.expanduser("~/.snap/profile.pkl")
profiles =
|
pd.read_pickle(profile_file)
|
pandas.read_pickle
|
# -*- coding: utf-8 -*-
"""
Bridge to the pandas library.
:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import numpy as np
import pandas as pd
import quantities as pq
from elephant.neo_tools import (extract_neo_attrs, get_all_epochs,
get_all_events, get_all_spiketrains)
def _multiindex_from_dict(inds):
"""Given a dictionary, return a `pandas.MultiIndex`.
Parameters
----------
inds : dict
A dictionary where the keys are annotations or attribute names and
the values are the corresponding annotation or attribute value.
Returns
-------
pandas MultiIndex
"""
names, indexes = zip(*sorted(inds.items()))
return pd.MultiIndex.from_tuples([indexes], names=names)
def _sort_inds(obj, axis=0):
"""Put the indexes and index levels of a pandas object in sorted order.
Paramters
---------
obj : pandas Series, DataFrame, Panel, or Panel4D
The object whose indexes should be sorted.
axis : int, list, optional, 'all'
The axis whose indexes should be sorted. Default is 0.
Can also be a list of indexes, in which case all of those axes
are sorted. If 'all', sort all indexes.
Returns
-------
pandas Series, DataFrame, Panel, or Panel4D
A copy of the object with indexes sorted.
Indexes are sorted in-place.
"""
if axis == 'all':
return _sort_inds(obj, axis=range(obj.ndim))
if hasattr(axis, '__iter__'):
for iax in axis:
obj = _sort_inds(obj, iax)
return obj
obj = obj.reorder_levels(sorted(obj.axes[axis].names), axis=axis)
return obj.sortlevel(0, axis=axis, sort_remaining=True)
def _extract_neo_attrs_safe(obj, parents=True, child_first=True):
"""Given a neo object, return a dictionary of attributes and annotations.
This is done in a manner that is safe for `pandas` indexes.
Parameters
----------
obj : neo object
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
dict
A dictionary where the keys are annotations or attribute names and
the values are the corresponding annotation or attribute value.
"""
res = extract_neo_attrs(obj, skip_array=True, skip_none=True,
parents=parents, child_first=child_first)
for key, value in res.items():
res[key] = _convert_value_safe(value)
key2 = _convert_value_safe(key)
if key2 is not key:
res[key2] = res.pop(key)
return res
def _convert_value_safe(value):
"""Convert `neo` values to a value compatible with `pandas`.
Some types and dtypes used with neo are not safe to use with pandas in some
or all situations.
`quantities.Quantity` don't follow the normal python rule that values
with that are equal should have the same hash, making it fundamentally
incompatible with `pandas`.
On python 3, `pandas` coerces `S` dtypes to bytes, which are not always
safe to use.
Parameters
----------
value : any
Value to convert (if it has any known issues).
Returns
-------
any
`value` or a version of value with potential problems fixed.
"""
if hasattr(value, 'dimensionality'):
return (value.magnitude.tolist(), str(value.dimensionality))
if hasattr(value, 'dtype') and value.dtype.kind == 'S':
return value.astype('U').tolist()
if hasattr(value, 'tolist'):
return value.tolist()
if hasattr(value, 'decode') and not hasattr(value, 'encode'):
return value.decode('UTF8')
return value
def spiketrain_to_dataframe(spiketrain, parents=True, child_first=True):
"""Convert a `neo.SpikeTrain` to a `pandas.DataFrame`.
The `pandas.DataFrame` object has a single column, with each element
being the spike time converted to a `float` value in seconds.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations. The `index`
is the spike number.
Parameters
----------
spiketrain : neo SpikeTrain
The SpikeTrain to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
Returns
-------
pandas DataFrame
A DataFrame containing the spike times from `spiketrain`.
Notes
-----
The index name is `spike_number`.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
attrs = _extract_neo_attrs_safe(spiketrain,
parents=parents, child_first=child_first)
columns = _multiindex_from_dict(attrs)
times = spiketrain.magnitude
times = pq.Quantity(times, spiketrain.units).rescale('s').magnitude
times = times[np.newaxis].T
index = pd.Index(np.arange(len(spiketrain)), name='spike_number')
pdobj = pd.DataFrame(times, index=index, columns=columns)
return _sort_inds(pdobj, axis=1)
def event_to_dataframe(event, parents=True, child_first=True):
"""Convert a `neo.core.Event` to a `pandas.DataFrame`.
The `pandas.DataFrame` object has a single column, with each element
being the event label from the `event.label` attribute.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations. The `index`
is the time stamp from the `event.times` attribute.
Parameters
----------
event : neo Event
The Event to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the labels from `event`.
Notes
-----
If the length of event.times and event.labels are not the same,
the longer will be truncated to the length of the shorter.
The index name is `times`.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
attrs = _extract_neo_attrs_safe(event,
parents=parents, child_first=child_first)
columns = _multiindex_from_dict(attrs)
times = event.times.rescale('s').magnitude
labels = event.labels.astype('U')
times = times[:len(labels)]
labels = labels[:len(times)]
index =
|
pd.Index(times, name='times')
|
pandas.Index
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/3/18 17:03
Desc: 英为财情-股票指数-全球股指与期货指数数据接口
https://cn.investing.com/indices/volatility-s-p-500-historical-data
"""
import re
import pandas as pd
from bs4 import BeautifulSoup
from akshare.index.cons import short_headers, long_headers
from akshare.utils.ak_session import session
def _get_global_index_country_name_url() -> dict:
"""
全球指数-各国的全球指数数据
https://cn.investing.com/indices/global-indices?majorIndices=on&primarySectors=on&bonds=on&additionalIndices=on&otherIndices=on&c_id=37
:return: 国家和代码
:rtype: dict
"""
url = "https://cn.investing.com/indices/global-indices"
params = {
"majorIndices": "on",
"primarySectors": "on",
"bonds": "on",
"additionalIndices": "on",
"otherIndices": "on",
}
r = session.get(url, params=params, headers=short_headers)
data_text = r.text
soup = BeautifulSoup(data_text, "lxml")
name_url_option_list = soup.find_all("option")[1:]
url_list = [
item["value"]
for item in name_url_option_list
if "c_id" in item["value"]
]
url_list_code = [
item["value"].split("?")[1].split("=")[1]
for item in name_url_option_list
if "c_id" in item["value"]
]
name_list = [item.get_text() for item in name_url_option_list][
: len(url_list)
]
_temp_df = pd.DataFrame([name_list, url_list_code]).T
name_code_list = dict(zip(_temp_df.iloc[:, 0], _temp_df.iloc[:, 1]))
return name_code_list
def _get_global_country_name_url() -> dict:
"""
可获得指数数据国家对应的 URL
:return: URL
:rtype: dict
"""
url = "https://cn.investing.com/indices/"
res = session.post(url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
name_url_option_list = soup.find(
"select", attrs={"name": "country"}
).find_all("option")[
1:
] # 去掉-所有国家及地区
url_list = [item["value"] for item in name_url_option_list]
name_list = [item.get_text() for item in name_url_option_list]
name_code_map_dict = {}
name_code_map_dict.update(zip(name_list, url_list))
return name_code_map_dict
def index_investing_global_country_name_url(country: str = "中国") -> dict:
"""
参考网页: https://cn.investing.com/indices/
获取选择国家对应的: 主要指数, 主要行业, 附加指数, 其他指数
:param country: str 中文国家名称, 对应 get_global_country_name_url 函数返回的国家名称
:return: dict
"""
pd.set_option("mode.chained_assignment", None)
name_url_dict = _get_global_country_name_url()
name_code_dict = _get_global_index_country_name_url()
url = f"https://cn.investing.com{name_url_dict[country]}?&majorIndices=on&primarySectors=on&additionalIndices=on&otherIndices=on"
res = session.post(url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
url_list = [
item.find("a")["href"]
for item in soup.find_all(attrs={"class": "plusIconTd"})
]
name_list = [
item.find("a").get_text()
for item in soup.find_all(attrs={"class": "plusIconTd"})
]
name_code_map_dict = {}
name_code_map_dict.update(zip(name_list, url_list))
url = "https://cn.investing.com/indices/global-indices"
params = {
"majorIndices": "on",
"primarySectors": "on",
"bonds": "on",
"additionalIndices": "on",
"otherIndices": "on",
"c_id": name_code_dict[country],
}
r = session.get(url, params=params, headers=short_headers)
data_text = r.text
soup = BeautifulSoup(data_text, "lxml")
soup_list = soup.find("table", attrs={"id": "cr_12"}).find_all("a")
global_index_url = [item["href"] for item in soup_list]
global_index_name = [item["title"] for item in soup_list]
name_code_map_dict.update(zip(global_index_name, global_index_url))
return name_code_map_dict
def index_investing_global(
country: str = "美国",
index_name: str = "纳斯达克100",
period: str = "每日",
start_date: str = "20100101",
end_date: str = "20211031",
) -> pd.DataFrame:
"""
具体国家的具体指数的从 start_date 到 end_date 期间的数据
:param country: 对应函数中的国家名称
:type country: str
:param index_name: 对应函数中的指数名称
:type index_name: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '2000-01-01', 注意格式
:type start_date: str
:param end_date: '2019-10-17', 注意格式
:type end_date: str
:return: 指定参数的数据
:rtype: pandas.DataFrame
"""
start_date = "/".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "/".join([end_date[:4], end_date[4:6], end_date[6:]])
period_map = {"每日": "Daily", "每周": "Weekly", "每月": "Monthly"}
name_code_dict = index_investing_global_country_name_url(country)
temp_url = f"https://cn.investing.com/{name_code_dict[index_name]}-historical-data"
res = session.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
title = soup.find("h2", attrs={"class": "float_lang_base_1"}).get_text()
res = session.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
data = soup.find_all(text=re.compile("window.histDataExcessInfo"))[
0
].strip()
para_data = re.findall(r"\d+", data)
payload = {
"curr_id": para_data[0],
"smlID": para_data[1],
"header": title,
"st_date": start_date,
"end_date": end_date,
"interval_sec": period_map[period],
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
}
url = "https://cn.investing.com/instruments/HistoricalDataAjax"
res = session.post(url, data=payload, headers=long_headers)
df_data = pd.read_html(res.text)[0]
if period == "每月":
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月")
else:
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月%d日")
if any(df_data["交易量"].astype(str).str.contains("-")):
df_data["交易量"][df_data["交易量"].str.contains("-")] = df_data["交易量"][
df_data["交易量"].str.contains("-")
].replace("-", 0)
if any(df_data["交易量"].astype(str).str.contains("B")):
df_data["交易量"][df_data["交易量"].str.contains("B").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("B").fillna(False)]
.str.replace("B", "")
.str.replace(",", "")
.astype(float)
* 1000000000
)
if any(df_data["交易量"].astype(str).str.contains("M")):
df_data["交易量"][df_data["交易量"].str.contains("M").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("M").fillna(False)]
.str.replace("M", "")
.str.replace(",", "")
.astype(float)
* 1000000
)
if any(df_data["交易量"].astype(str).str.contains("K")):
df_data["交易量"][df_data["交易量"].str.contains("K").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("K").fillna(False)]
.str.replace("K", "")
.str.replace(",", "")
.astype(float)
* 1000
)
df_data["交易量"] = df_data["交易量"].astype(float)
df_data = df_data[["收盘", "开盘", "高", "低", "交易量"]]
df_data = df_data.astype(float)
df_data.sort_index(inplace=True)
df_data.reset_index(inplace=True)
df_data["日期"] = pd.to_datetime(df_data["日期"]).dt.date
return df_data
def index_investing_global_from_url(
url: str = "https://www.investing.com/indices/ftse-epra-nareit-eurozone",
period: str = "每日",
start_date: str = "20000101",
end_date: str = "20210909",
) -> pd.DataFrame:
"""
获得具体指数的从 start_date 到 end_date 期间的数据
https://www.investing.com/indices/ftse-epra-nareit-eurozone
:param url: 具体数据链接
:type url: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '2000-01-01', 注意格式
:type start_date: str
:param end_date: '2019-10-17', 注意格式
:type end_date: str
:return: 指定参数的数据
:rtype: pandas.DataFrame
"""
start_date = "/".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "/".join([end_date[:4], end_date[4:6], end_date[6:]])
period_map = {"每日": "Daily", "每周": "Weekly", "每月": "Monthly"}
url_name = url.split("/")[-1]
temp_url = f"https://cn.investing.com/indices/{url_name}-historical-data"
res = session.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
title = soup.find("h2", attrs={"class": "float_lang_base_1"}).get_text()
res = session.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
data = soup.find_all(text=re.compile("window.histDataExcessInfo"))[
0
].strip()
para_data = re.findall(r"\d+", data)
payload = {
"curr_id": para_data[0],
"smlID": para_data[1],
"header": title,
"st_date": start_date,
"end_date": end_date,
"interval_sec": period_map[period],
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
}
url = "https://cn.investing.com/instruments/HistoricalDataAjax"
res = session.post(url, data=payload, headers=long_headers)
df_data =
|
pd.read_html(res.text)
|
pandas.read_html
|
import base64
import io
import json
from collections import namedtuple
from datetime import date
from typing import Union
import pandas as pd
from django.db.models import Q
from apps.ecommerce.models import Order
from ..organizations.models import Organization
from ..organizations.permissions import check_user_membership
from .models import Category, Event, SignUp
DEFAULT_REPORT_FIELDS = {
"signup_timestamp",
"event_title",
"user_first_name",
"user_last_name",
"signup_user_grade_year",
"signup_user_email",
"signup_user_phone_number",
"user_allergies",
"attendance_status",
"order_timestamp",
"has_paid",
"order_id",
"order_quantity",
"order_total_price",
}
FiletypeSpec = namedtuple("FiletypeSpec", ["content_type", "extension"])
filetype_specs = {
"xlsx": FiletypeSpec(
content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
extension="xlsx",
),
"csv": FiletypeSpec(content_type="text/csv", extension="csv"),
"html": FiletypeSpec(content_type="text/html", extension="html"),
}
class EventResolvers:
def resolve_all_events(self, info, category=None, organization=None, start_time=None, end_time=None):
"""
Get all events that fit the given filters
"""
if category or organization or start_time or end_time:
filteredEvents = Event.objects
if start_time and end_time:
filteredEvents = filteredEvents.filter(start_time__range=(start_time, end_time))
elif start_time:
filteredEvents = filteredEvents.filter(start_time__gte=(start_time))
elif end_time:
filteredEvents = filteredEvents.filter(start_time__lte=(end_time))
queries = []
kwargs = {}
if category is not None:
kwargs["category__name"] = category
# For generalization, if more filters are added later
new_kwargs = {f"{k}__icontains": v for k, v in kwargs.items()}
queries = [Q(**{k: v}) for k, v in new_kwargs.items()]
if organization: # for organizations, check if the organization argument corresponds to either
queries.append( # the organization of the event itself and the parent organization (if it exists)
Q(organization__name__icontains=organization)
| Q(organization__parent__name__icontains=organization)
)
return (
filteredEvents.filter(*queries)
.filter(start_time__gte=date.today()) # Only show events that have yet to pass
.order_by("start_time")
)
return Event.objects.filter(start_time__gte=date.today()).order_by("start_time")
def resolve_default_events(self, info):
"""
For each organization, get the most recent (future) event
"""
return Event.objects.filter(start_time__gte=date.today()).distinct("organization")
def resolve_event(self, info, id):
try:
return Event.objects.get(id=id)
except Event.DoesNotExist:
return None
def resolve_all_categories(self, info):
return Category.objects.all()
def resolve_category(self, info, id):
try:
return Category.objects.get(id=id)
except Category.DoesNotExist:
return None
def resolve_attendee_report(self, info, event_id, fields=None, filetype="xlsx"):
try:
event = Event.objects.get(id=event_id)
except Event.DoesNotExist:
return None
check_user_membership(info.context.user, event.organization)
df = create_attendee_report([event_id], fields)
file_basename = f"attendee_report__eventid_{event_id}"
return wrap_attendee_report_as_json(df, file_basename, filetype)
def resolve_attendee_reports(self, info, event_ids, fields=None, filetype="xlsx"):
for event_id in event_ids:
try:
event = Event.objects.get(id=event_id)
except Event.DoesNotExist:
return None
check_user_membership(info.context.user, event.organization)
df = create_attendee_report(event_ids, fields)
file_basename = f"attendee_report__eventid_{'|'.join(str(id_) for id_ in event_ids)}"
return wrap_attendee_report_as_json(df, file_basename, filetype)
def resolve_attendee_report_org(self, info, org_id, fields=None, filetype="xlsx"):
try:
org = Organization.objects.get(id=org_id)
except Organization.DoesNotExist:
return None
check_user_membership(info.context.user, org)
event_ids = Organization.objects.get(id=org_id).events.values_list("id", flat=True)
df = create_attendee_report(event_ids, fields)
file_basename = f"attendee_report__orgid_{org_id}"
return wrap_attendee_report_as_json(df, file_basename, filetype)
def resolve_sign_ups(self, info, event_id):
try:
event = Event.objects.get(id=event_id)
except Event.DoesNotExist:
return None
check_user_membership(info.context.user, event.organization)
return SignUp.objects.filter(event=event)
def export_single_event(event_id: int, fields: Union[list[str], set[str]]) -> pd.DataFrame:
event: Event = Event.objects.get(pk=event_id)
attending_users = event.signed_up_users[: event.available_slots]
wait_list = event.signed_up_users[event.available_slots :]
sign_ups = SignUp.objects.filter(event_id=event_id, is_attending=True)
df_users = pd.DataFrame(
columns=[
"user_first_name",
"user_last_name",
"user_allergies",
]
)
if attending_users.exists():
df_users_attending = pd.DataFrame(attending_users.values()).set_index("id").add_prefix("user_")
df_users_attending["attendance_status"] = "ATTENDING"
df_users = pd.concat([df_users, df_users_attending])
if wait_list.exists():
df_users_wait_list = pd.DataFrame(wait_list.values()).set_index("id").add_prefix("user_")
df_users_wait_list["attendance_status"] = "WAIT LIST"
df_users = pd.concat([df_users, df_users_wait_list])
if event.products.exists():
product = event.products.first()
orders = Order.objects.filter(product=product)
df_orders = pd.DataFrame(orders.values()).set_index("user_id").add_prefix("order_")
df_users = df_users.join(df_orders)
payment_successful = df_users["order_payment_status"] == Order.PaymentStatus.CAPTURED
df_users["has_paid"] = payment_successful
df_sign_ups = (
pd.DataFrame(sign_ups.order_by("timestamp").values())
.add_prefix("signup_")
.rename(columns={"signup_event_id": "event_id", "signup_user_id": "user_id"})
)
df_joined = df_sign_ups.join(df_users, on="user_id").sort_values(["event_id", "user_id"])
df_joined["event_title"] = event.title
if df_joined.empty:
return pd.DataFrame()
report_fields = list(DEFAULT_REPORT_FIELDS.intersection(df_joined.columns))
fields = set(fields).intersection(report_fields) if fields is not None else report_fields
return df_joined.loc[:, report_fields].drop("password", errors="ignore", axis=1).loc[:, fields]
def create_attendee_report(event_ids, fields):
df = pd.DataFrame()
for event_id in event_ids:
df = pd.concat([df, export_single_event(event_id, fields)])
return df
def wrap_attendee_report_as_json(df, file_basename, filetype):
# Handle different content types
if filetype == "xlsx":
if "signup_timestamp" in df:
df["signup_timestamp"] = df["signup_timestamp"].apply(lambda a: pd.to_datetime(a).tz_localize(None))
if "order_timestamp" in df:
df["order_timestamp"] = df["order_timestamp"].apply(lambda a:
|
pd.to_datetime(a)
|
pandas.to_datetime
|
import collections
import threading
import gc
import traceback
import pandas as pd
import numpy as np
from optable.dataset import feature_types
from optable import _core
class Table(object):
"""avalble for only automl data frame
"""
def __init__(self, df, time_col=None, label_encoders={}, min_time=None):
self.__df = df
self.__time_col = time_col
self.__min_time = min_time
self.__cache = {}
self.__pseudo_target = None
self.__adversarial_true_count = None
self.__adversarial_total_count = None
self.__new_data = {}
if self.__time_col is not None:
time_data = self.__df[self.__time_col]
time_data.index = range(len(time_data))
if min_time is None:
raise ValueError("min_time is None")
time_data = time_data - min_time
time_data = time_data.astype(int).values
time_data = time_data / 1e9
second_time_data = time_data.astype(int)
minute_time_data = second_time_data // 60
hour_time_data = minute_time_data // 60
day_time_data = hour_time_data // 24
second_time_data = second_time_data.astype(np.float32)
minute_time_data = minute_time_data.astype(np.float32)
hour_time_data = hour_time_data.astype(np.float32)
day_time_data = day_time_data.astype(np.float32)
time_data = time_data.astype(np.float32)
"""
time_data[time_data < 0] = np.nan
second_time_data[second_time_data < 0] = np.nan
minute_time_data[minute_time_data < 0] = np.nan
hour_time_data[hour_time_data < 0] = np.nan
day_time_data[day_time_data < 0] = np.nan
"""
self.__time_data = time_data
self.__second_time_data = second_time_data
self.__minute_time_data = minute_time_data
self.__hour_time_data = hour_time_data
self.__day_time_data = day_time_data
self.__sorted_time_index = \
np.argsort(time_data).astype(np.int32)
else:
self.__sorted_time_index = None
self.__hist_time_data = None
self.__ftypes = pd.Series(
self.__automl_df_to_ftypes(), self.__df.dtypes.index)
self.__label_encoders = label_encoders
self.__tfidf_vectorizers = {}
self.__preprocess()
self.__ftypes = pd.Series(
self.__automl_df_to_ftypes(), self.__df.dtypes.index)
self.__nunique = pd.Series(
[self.__df[col].nunique() for col in self.__df],
self.__df.dtypes.index)
self.__set_new_data_lock = threading.Lock()
@property
def ftypes(self):
return self.__ftypes
@property
def df(self):
return self.__df
@property
def sorted_time_index(self):
return self.__sorted_time_index
@property
def time_data(self):
return self.__time_data
@property
def second_time_data(self):
return self.__second_time_data
@property
def minute_time_data(self):
return self.__minute_time_data
@property
def hour_time_data(self):
return self.__hour_time_data
@property
def day_time_data(self):
return self.__day_time_data
@property
def has_time(self):
if self.__time_col is None:
return False
return True
def get_lightgbm_df(self, max_cat_nunique=30):
columns = []
col_idx = []
cat_idx = []
idx = 0
lightgbm_feature_types = [
feature_types.numerical,
feature_types.categorical,
feature_types.mc_processed_numerical,
feature_types.c_processed_numerical,
feature_types.t_processed_numerical,
feature_types.n_processed_categorical,
feature_types.mc_processed_categorical,
feature_types.c_processed_categorical,
feature_types.t_processed_categorical,
feature_types.aggregate_processed_numerical,
feature_types.aggregate_processed_categorical
]
cat_feature_types = [
feature_types.categorical,
feature_types.aggregate_processed_categorical,
feature_types.n_processed_categorical,
feature_types.mc_processed_categorical,
feature_types.c_processed_categorical,
feature_types.t_processed_categorical,
]
for col_i, col in enumerate(self.__df.columns):
for ftype in lightgbm_feature_types:
if col.startswith(ftype.prefix):
if ftype in cat_feature_types:
if self.__nunique[col] <= max_cat_nunique:
cat_idx.append(idx)
columns.append(col)
col_idx.append(col_i)
idx += 1
else:
columns.append(col)
col_idx.append(col_i)
idx += 1
break
return self.__df.take(col_idx, axis=1, is_copy=False), cat_idx
def set_ftypes(self, ftypes):
if isinstance(ftypes, list):
self.__ftypes[:] = ftypes
elif isinstance(ftypes, dict):
for k in ftypes:
self.__ftypes[k] = ftypes[k]
@property
def nunique(self):
return self.__nunique
def set_new_data(self, data, name):
self.__set_new_data_lock.acquire()
if name in self.__df.columns or name in self.__new_data:
print("duplicated", name)
try:
self.__new_data[name] = data
except Exception as e:
print(name)
traceback.print_exc()
finally:
self.__set_new_data_lock.release()
@property
def new_data_size(self):
return len(self.__new_data)
def get_new_data(self):
cat_feature_types = [
feature_types.categorical,
feature_types.aggregate_processed_categorical,
feature_types.n_processed_categorical,
feature_types.mc_processed_categorical,
feature_types.c_processed_categorical,
feature_types.t_processed_categorical,
]
is_cat = [
feature_types.column_name_to_ftype(key)
in cat_feature_types for key in self.__new_data]
return [self.__new_data[key] for key in self.__new_data], is_cat
def clear_new_data(self):
self.__new_data = {}
def confirm_new_data(self):
new_df = pd.DataFrame(self.__new_data)
for name in self.__new_data:
prefix = "{}_".format(name.split("_")[0])
self.__ftypes[name] = feature_types.prefix_to_ftype[prefix]
self.__nunique[name] = new_df[name].nunique()
self.__new_data = {}
gc.collect()
self.__df = pd.concat([self.__df, new_df], axis=1)
gc.collect()
def test_concat(self, test_df):
pass
def __preprocess(self):
cols_of_each_ftype = self.cols_of_each_ftype
# numericalでnuniqueが低いものはcategoricalに
"""
if len(self.__df) > 1000:
columns = self.__df.columns
for col in columns:
if self.__ftypes[col] == feature_types.numerical:
if self.__df[col].nunique() <= 10:
self.__df["{}{}".format(
feature_types.categorical.prefix, col,
)] = self.__df[col].astype(str)
self.__df.drop(col, axis=1, inplace=True)
print("numerical {} change to categorical".format(col))
self.__ftypes = pd.Series(
self.__automl_df_to_ftypes(), self.__df.dtypes.index)
"""
import time
new_data = {}
columns = self.__df.columns
for col in columns:
start = time.time()
if self.__ftypes[col] == feature_types.time:
# Time preprocess
self.__df[col] = pd.to_datetime(self.__df[col])
"""
# time numericalize
if self.__min_time is not None:
self.__df["{}numericalized_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = ((self.__df[col] - self.__min_time).astype(int)
/ 1e9).astype(np.float32)
else:
self.__df["{}numericalized_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = (self.__df[col].astype(int)
/ 1e9).astype(np.float32)
"""
max_min_time_diff = self.__df[col].max() - self.__df[col].min()
# time hour
if max_min_time_diff > pd.Timedelta('2 hours'):
new_data["{}hour_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = self.__df[col].dt.hour.values.astype(np.float32)
# time year
if max_min_time_diff > pd.Timedelta('500 days'):
new_data["{}year_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = self.__df[col].dt.year.values.astype(np.float32)
# time doy
if max_min_time_diff > pd.Timedelta('100 days'):
new_data["{}doy_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = self.__df[col].dt.dayofyear.values.astype(np.float32)
# time dow
if max_min_time_diff > pd.Timedelta('2 days'):
new_data["{}dow_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = self.__df[col].dt.dayofweek.values.astype(np.float32)
# weekend
if max_min_time_diff > pd.Timedelta('2 days'):
new_data["{}id_weekend_{}".format(
feature_types.t_processed_categorical.prefix, col,
)] = (self.__df[col].dt.dayofweek >= 5).astype(np.int32)
# time zone
if max_min_time_diff >
|
pd.Timedelta('8 hours')
|
pandas.Timedelta
|
from flask import Flask
from flask import render_template
from flask import request
#from flask_wtf import CsrfProtect #this is what the video said, but it gives a warning, the one one line below works without warnings
from flask_wtf import CSRFProtect
import forms
from flask import make_response #for the cookie
from flask import session
from flask import url_for
from flask import redirect
from flask import flash
from flask import g #to allow global variables. They will be alive until the end of after_request, i.e. return response.
#It will be used in only one petition. Two clients cannot share the same global variable.
from config import DevelopmentConfig
from models import db
from models import userstest #name of the MODEL, not table to import
#from models import Comment in CF
from models import comments
from helper import date_format
from flask_mail import Mail
from flask_mail import Message
import threading #to send the emails in the background so the app is faster
from flask import copy_current_request_context
#after CF
from models import vendors, rfielements_providers, rfielements_analysts, suitemodcatelem, vendors_rfi, suitemodules, suitemodcat, category_names, element_names, elementvariants, current_quarteryear, users
#####Just for creating raw MySQL queries########
from sqlalchemy import create_engine
eng = create_engine(DevelopmentConfig.SQLALCHEMY_DATABASE_URI)
################################################
import sys
import urllib.parse #to encode urls
import pandas as pd
pd.set_option('display.expand_frame_repr', False) #just to make the print of pandas wider
from sqlalchemy import desc, func, and_, or_
import json
from helper import previous_quarter_year, next_quarter_year, last_self_score, last_sm_score
import numpy as np
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_admin import BaseView, expose
app = Flask(__name__)
app.config.from_object(DevelopmentConfig) #here is where we decide if Development or Production.
#before using config:
#app.secret_key = 'my_secret_key' #though it is a good practice to use os.get() to get the secret key, and not write it in the code
#before using config:
#csrf = CsrfProtect(app)
#after config
#csrf = CsrfProtect() #now we do this at the end, in if __name__...
#this is what the video said, but it gives a warning, the one one line below works without warnings
#csrf = CSRFProtect() THIS WASN'T WORKING
csrf = CSRFProtect(app) #from https://flask-wtf.readthedocs.io/en/stable/csrf.html
mail = Mail()
def send_email(user_email, username): #to know where to send it and write the username in the email
msg = Message('Thank you for signing up!', #title of the email.
sender = app.config['MAIL_USERNAME'],
recipients = [user_email])
msg.html = render_template('email.html', username = username)
mail.send(msg) #here we actually send the message
@app.errorhandler(404)
def page_not_found(e): #anything works, not only an 'e'
return render_template('404.html'), 404 #flask doesn't send the error number, we have to do it.
@app.before_request #we use this to validate , like if the user has permission to access that url, or even if we need a visit counter to that url,
def before_request():
if 'username' not in session:
print (request.endpoint) #this gives you the last part of the url
print ('User needs to log in!')
#validate the url...validate if the user is authenticated, let's imagine we want to make 'comment' only accessible to authenticated users
if 'username' not in session and request.endpoint in ['comment']:
return redirect(url_for('login'))
elif 'username' in session and request.endpoint in ['login', 'create']: #why an authenticated user would go to login or create, let's send him/her to index
return redirect(url_for('index')) #the function index, not the route.
g.test = 'test' #here we create the global variables. ?I guess we could pull all of one vendor's data here??
@app.route('/')
def index():
'''
#we are reading cookies here
#custome_cookie = request.cookies.get('custome_cookie') this would receive the custome_cookie we created and sent ('Eduardo')
custome_cookie = request.cookies.get('custome_cookies', 'Undefined') #this does: if you don't find custome_cookie within custome_cookies, it returns undefined
print (custome_cookie)
'''
if 'username' in session: #session is our sessions dictionary
username = session['username']
title = "Index"
return render_template('index.html', title = title)
@app.route('/logout') #here we destroy the cookies
def logout():
if 'username' in session:
session.pop('username') #destroy cookie
return redirect(url_for('login')) # to redirect, using url_for we type the function name, not the path, so just 'login', no /asddad/adad/login
@app.route('/login', methods = ['GET', 'POST'])
def login():
login_form = forms.LoginForm(request.form)
if request.method == 'POST' and login_form.validate():
username = login_form.username.data
password = login_form.password.data
user = userstest.query.filter_by(username = username).first() #select * from users where username = username limit 1. It returns an object with the information of the user. If not found, it will return a None
if user is not None and user.verify_password(password):
success_message = 'Welcome {}'.format(username)
flash(success_message)
session['username'] = username
session['user_id'] = user.id
return redirect( url_for('index') )
else:
error_message = 'Invalid username or password'
flash(error_message)
#after a;dding session['username'] = username a few lines above, isn't this one left over?
#session['username'] = login_form.username.data #a session variable called username will be created each time whose value is the own username
return render_template('login.html', form = login_form)
@app.route('/cookie')
def cookie():
#we are creating cookies here
response = make_response( render_template('cookie.html'))
response.set_cookie('custome_cookie', 'Eduardo')
return response
#by default in flask, only method GET, we have to specify POST
@app.route('/comment', methods = ['GET', 'POST'])
def comment():
comment_form = forms.CommentForm(request.form)
if request.method == 'POST' and comment_form.validate(): #to validate forms inputs. We also had to add it in _macro.html, in the list {{field.errors}}
'''print(comment_form.username.data)
print(comment_form.email.data)
print(comment_form.comment.data)
else:
print ("Error in the form!!")'''
user_id = session['user_id'] #since we work with cookies, this is the way to get the user_id
comment = comments(user_id = user_id,
text = comment_form.comment.data)
print(comment)
db.session.add(comment)
db.session.commit()
success_message = "New comment created"
flash(success_message)
title = "Flask Course"
return render_template('comment.html', title = title, comment_form = comment_form)
@app.route('/create', methods = ['GET', 'POST'])
def create():
create_form = forms.CreateForm(request.form)
if request.method == 'POST' and create_form.validate():
user = userstest(create_form.username.data,
create_form.password.data,
create_form.email.data)
db.session.add(user) #this needs an objects heredated from model, like user
db.session.commit() #here we insert it in the database
#SQLAlchemy is clever enough to know hwo to open and close connections, so we don't have to worry about that if we wrtie those two lines.
@copy_current_request_context #this is like the bridge to send the email in the background..
def send_message(email,username):
send_email(email, username)
sender = threading.Thread(name='mail_sender',
target = send_message,
args = (user.email, user.username)) #arguments of the function that sends the email.
sender.start()
success_message = 'User registered in the database'
flash(success_message)
return render_template('create.html', form = create_form)
@app.route('/reviews/', methods=['GET'])
@app.route('/reviews/<int:page>', methods = ['GET']) #we have to write it twice to make pages
def reviews(page = 1): # =1 is only the default value, so /reviews/ and /reviews/1 is the same
per_page = 1000
comment_list = comments.query.join(userstest).add_columns(
userstest.username, #the model, not the table
comments.text,
comments.created_date).paginate(page,per_page,True) #(page, rows per page, if True=404, if False: empty)
return render_template('reviews.html', comments = comment_list, date_format = date_format) #we send the function as a parameter
@app.after_request
def after_request(response):
return response #always return response
@app.route('/rfi/', methods=['GET'])
@app.route('/rfi/<vendor_name>/', methods=['GET'], defaults={ 'module_name' : None})
@app.route('/rfi/<vendor_name>/<module_name>/', methods=['GET', 'POST'])
def rfi(vendor_name, module_name):
vendorid = vendors.query.filter_by(vendor_name = vendor_name).add_columns(vendors.vendorid).first()[1]
current_quarter = current_quarteryear.query.add_columns(current_quarteryear.quarter).first()[1]
current_year = current_quarteryear.query.add_columns(current_quarteryear.year).first()[1]
print('current quarter:', current_quarter, current_year)
if module_name is None:
title = vendor_name
'''
This commented block pulled every module a vendor had participated looking at rfielements, but it's better doing it through the table vendors_rfi
smce_ids_list_raw = rfielements.query.filter_by(vendor_id = vendorid).add_columns(rfielements.smce_id).all()
smce_ids_list = set()
for item in smce_ids_list_raw:
smce_ids_list.add(item[1])
module_ids_list_raw = suitemodcatelem.query.filter(suitemodcatelem.smceid.in_(smce_ids_list)).add_columns(suitemodcatelem.module_id).all()
module_ids_list = set()
for item in module_ids_list_raw:
module_ids_list.add(item[1])
module_ids_list = sorted(module_ids_list)
print(module_ids_list)
module_names_raw = modules.query.filter(modules.moduleid.in_(module_ids_list)).add_columns(modules.module_name).all()
module_names = []
for item in module_names_raw:
module_names.append(item[1])
print(module_names)
'''
suitemod_ids_raw = vendors_rfi.query.filter_by(vendor_id = vendorid).filter_by(quarter = current_quarter).filter_by(year = current_year).add_columns(vendors_rfi.suitemod_id, vendors_rfi.status, vendors_rfi.current_round).all()
print('suitemod_ids_raw',suitemod_ids_raw)
module_status_round = []
for item in suitemod_ids_raw:
module_name = suitemodules.query.filter_by(suitemodid = item[1]).order_by(desc(suitemodules.update_date)).add_columns(suitemodules.suitemod_name).first()[1]
if item[2] == 'N':
status = 'New'
elif item[2] == 'R':
status = 'Refreshing'
elif item[2] == 'E':
status = 'Existing'
elif item[2] == 'Z':
status = 'Not participaging anymore'
else:
sys.exit('Status is neither N, R, E or Z')
current_round = item[3]
module_status_round.append([module_name, status, current_round])
print('module_status_round', module_status_round)
return render_template('rfi:vendor.html', title = title, vendor_name = vendor_name, module_status_round = module_status_round, urllib_parse_quote = urllib.parse.quote)
else:
title = vendor_name + ' - ' + module_name
suitemodid = suitemodules.query.filter_by(suitemod_name = module_name).add_columns(suitemodules.suitemodid).first()[1]
status = vendors_rfi.query.filter_by(vendor_id = vendorid).filter_by(suitemod_id = suitemodid).filter_by(quarter = current_quarter).filter_by(year = current_year).add_columns(vendors_rfi.status).first()[1]
current_round = vendors_rfi.query.filter_by(vendor_id = vendorid).filter_by(suitemod_id = suitemodid).filter_by(quarter = current_quarter).filter_by(year = current_year).add_columns(vendors_rfi.current_round).first()[1]
form = forms.ElementForm(request.form)
'''if status == 'N' or status == 'R': Not necessary anymore since now current_round is 0, 1 or 2.
current_round = vendors_rfi.query.filter_by(vendor_id = vendorid).filter_by(suitemod_id = suitemodid).add_columns(vendors_rfi.current_round).first()[1]'''
print('status', status, '\ncurrent_round', current_round)
print('suitemodid', suitemodid)
suitemodcat_list_raw = suitemodcat.query.filter_by(suitemod_id = suitemodid).add_columns(suitemodcat.suitemodcatid, suitemodcat.category_name_id).all()
suitemodcat_list = []
category_name_ids_list = [] #same lenght as suitemodcat_list
for item in suitemodcat_list_raw:
suitemodcat_list.append(item[1])
category_name_ids_list.append(item[2])
print('suitemodcat_list', suitemodcat_list)
ids_list_raw = []
for item in suitemodcat_list:
ids_list_raw.append(suitemodcatelem.query.filter_by(suitemodcat_id = item).add_columns(suitemodcatelem.suitemodcat_id, suitemodcatelem.smceid, suitemodcatelem.element_name_id, suitemodcatelem.variant_id).all())
ids_list = [] #[suitemodcatid, [scmeid, elementnameid, variantid]]
smce_ids_list = []
for item in suitemodcat_list: ids_list.append([item, []])
for item1 in ids_list_raw:
for item2 in item1:
index = suitemodcat_list.index(item2[1])
ids_list[index][1].append([item2[2],item2[3],item2[4]])
smce_ids_list.append(item2[2])
print('smce_ids_list', smce_ids_list)
last_provider_submission = rfielements_providers.query.filter_by(vendor_id = vendorid).filter(rfielements_providers.smce_id.in_(smce_ids_list)).order_by(desc(rfielements_providers.update_date)).add_columns(rfielements_providers.update_date, rfielements_providers.user_id).first()[1:] #[date, user_id]
print('last_provider_submission', last_provider_submission)
print('ids_list[0]', ids_list[0])
#Averages table
categories_names = []
for item in category_name_ids_list: categories_names.append(category_names.query.filter_by(category_nameid = item).add_columns(category_names.category_name).first()[1])
categories_names.append('Average Score')
categories_ss_averages = []
total_suitemod_ss_sum = 0
total_suitemod_ss_len = 0
for item1 in ids_list:
category_ss_sum = 0
category_ss_len = 0
for item2 in item1[1]:
try:
current_ss = last_self_score(vendorid, item2[0], current_quarter, current_year)
category_ss_sum += current_ss
category_ss_len += 1
total_suitemod_ss_sum += current_ss
total_suitemod_ss_len += 1
except TypeError: pass
try: category_ss_average = category_ss_sum/category_ss_len
except ZeroDivisionError: category_ss_average = '-'
categories_ss_averages.append(category_ss_average)
total_suitemod_ss_average = total_suitemod_ss_sum/total_suitemod_ss_len
categories_ss_averages.append(total_suitemod_ss_average)
print(categories_ss_averages)
categories_last_quarter_averages = []
categories_sm_averages = []
categories_benchmark_averages = []
for item in range(0,11):
categories_last_quarter_averages.append('lq' + str(item))
categories_sm_averages.append('sm' + str(item))
categories_benchmark_averages.append('b' + str(item))
print(categories_last_quarter_averages, categories_sm_averages, categories_benchmark_averages)
summary_table = []
for item1, item2, item3, item4, item5 in zip(categories_names, categories_ss_averages, categories_last_quarter_averages, categories_sm_averages, categories_benchmark_averages): summary_table.append([item1, item2, item3, item4, item5])
print(summary_table)
'''rfielements_info:
0 vendor_id
1 smce_id
2 quarter
3 year
4 round
5 self_score
6 self_description
7 attachment_id
8 sm_score
9 analyst_notes
'''
#rfielements_info_raw = rfielements.query.filter_by(vendor_id = vendorid).filter(rfielements.smce_id.in_(smce_ids_list)).add_columns(rfielements.vendor_id, rfielements.smce_id, rfielements.quarter, rfielements.year, rfielements.round, rfielements.self_score, rfielements.self_description, rfielements.attachment_id, rfielements.sm_score, rfielements.analyst_notes).all()
rfielements_providers_info_raw = rfielements_providers.query.filter_by(vendor_id = vendorid).filter(rfielements_providers.smce_id.in_(smce_ids_list)).add_columns(rfielements_providers.vendor_id, rfielements_providers.smce_id, rfielements_providers.quarter, rfielements_providers.year, rfielements_providers.round, rfielements_providers.self_score, rfielements_providers.self_description, rfielements_providers.attachment_id).all()
rfielements_analysts_info_raw = rfielements_analysts.query.filter_by(vendor_id = vendorid).filter(rfielements_analysts.smce_id.in_(smce_ids_list)).add_columns(rfielements_analysts.vendor_id, rfielements_analysts.smce_id, rfielements_analysts.quarter, rfielements_analysts.year, rfielements_analysts.round, rfielements_analysts.sm_score, rfielements_analysts.analyst_notes).all()
#df = pd.DataFrame(rfielements_info_raw)
df_providers =
|
pd.DataFrame(rfielements_providers_info_raw)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import unittest
from dstools.regressor.BestGuessRegressor import BestGuessRegressor
class TestBestGuessRegressor(unittest.TestCase):
def compare_DataFrame(self, df_transformed, df_transformed_correct):
"""
helper function to compare the values of the transformed DataFrame with the values of a correctly transformed DataFrame
"""
#same number of columns
self.assertEqual(len(df_transformed.columns), len(df_transformed_correct.columns))
#check for every column in correct DataFrame, that all items are equal
for column in df_transformed_correct.columns:
#compare every element
for x, y in zip(df_transformed[column], df_transformed_correct[column]):
#if both values are np.NaN, the assertion fails, although they are equal
if np.isnan(x)==True and np.isnan(y)==True:
pass
else:
self.assertEqual(x, y)
def test_numeric_independent_numeric_dependent_best_guess(self):
"""
a constant numeric class should be returned
"""
df=pd.DataFrame({'x':[1,2,3],'y':[1,2,3]})
y_predicted_correct=pd.DataFrame({'y_predicted':[2,2,2]})
bgc = BestGuessRegressor()
bgc.fit(df['x'],df['y'])
y_predicted=pd.DataFrame(data=bgc.predict(df['x']),columns=['y_predicted'])
self.compare_DataFrame(y_predicted,y_predicted_correct)
def test_string_independent_numeric_dependent_best_guess(self):
"""
a constant numeric class should be returned
"""
df=
|
pd.DataFrame({'x':['1','2','3'],'y':[1,2,3]})
|
pandas.DataFrame
|
"""Unit tests for the NullTransformer."""
from unittest.mock import patch
import numpy as np
import pandas as pd
from rdt.transformers import NullTransformer
class TestNullTransformer:
def test___init__default(self):
"""Test the initialization without passing any default arguments.
When no arguments are passed, the attributes should be populated
with the right values.
Input:
- nothing
Expected Side Effects:
- The `_missing_value_replacement` attribute should be `None`.
- The `_model_missing_values` attribute should be `False`.
"""
# Run
transformer = NullTransformer()
# Assert
assert transformer._missing_value_replacement is None
assert not transformer._model_missing_values
def test___init__not_default(self):
"""Test the initialization passing values different than defaults.
When arguments are passed, the attributes should be populated
with the right values.
Input:
- Values different than the defaults.
Expected Side Effects:
- The attributes should be populated with the given values.
"""
# Run
transformer = NullTransformer('a_missing_value_replacement', False)
# Assert
assert transformer._missing_value_replacement == 'a_missing_value_replacement'
assert not transformer._model_missing_values
def test_models_missing_values(self):
"""Test the models_missing_values method.
If the `model_missing_values` attributes evalutes to True, the
`create_model_missing_values` method should return the same value.
Setup:
- Create an instance and set _model_missing_values to True
Expected Output:
- True
"""
# Setup
transformer = NullTransformer('something', model_missing_values=True)
transformer._model_missing_values = True
# Run
models_missing_values = transformer.models_missing_values()
# Assert
assert models_missing_values
def test__get_missing_value_replacement_scalar(self):
"""Test _get_missing_value_replacement when a scalar value is passed.
If a missing_value_replacement different from None, 'mean' or 'mode' is
passed to __init__, that value is returned.
Setup:
- NullTransformer passing a specific missing_value_replacement
that is not None, mean or mode.
Input:
- A Series with some values.
- A np.array with boolean values.
Expected Output:
- The value passed to __init__
"""
# Setup
transformer = NullTransformer('a_missing_value_replacement')
# Run
data = pd.Series([1, np.nan, 3], name='abc')
missing_value_replacement = transformer._get_missing_value_replacement(data)
# Assert
assert missing_value_replacement == 'a_missing_value_replacement'
def test__get_missing_value_replacement_all_nulls(self):
"""Test _get_missing_value_replacement when all the values are null.
If the missing_value_replacement is not a scalar value and all the data
values are null, the output be the mean, which is `np.nan`.
Setup:
- NullTransformer passing 'mean' as the missing_value_replacement.
Input:
- A Series filled with nan values.
- A np.array of all True values.
Expected Output:
- 0
"""
# Setup
transformer = NullTransformer('mean')
# Run
data = pd.Series([np.nan, np.nan, np.nan], name='abc')
missing_value_replacement = transformer._get_missing_value_replacement(data)
# Assert
assert missing_value_replacement is np.nan
def test__get_missing_value_replacement_none_numerical(self):
"""Test _get_missing_value_replacement when missing_value_replacement is None.
If the missing_value_replacement is None and the data is numerical,
the output fill value should be the mean of the input data.
Setup:
- NullTransformer passing with default arguments.
Input:
- An Series filled with integer values such that the mean
is not contained in the series and there is at least one null.
- A np.array of booleans indicating which values are null.
Expected Output:
- The mean of the inputted Series.
"""
# Setup
transformer = NullTransformer('mean')
# Run
data = pd.Series([1, 2, np.nan], name='abc')
missing_value_replacement = transformer._get_missing_value_replacement(data)
# Assert
assert missing_value_replacement == 1.5
def test__get_missing_value_replacement_none_not_numerical(self):
"""Test _get_missing_value_replacement when missing_value_replacement is None.
If the missing_value_replacement is None and the data is not numerical,
the output fill value should be the mode of the input data.
Setup:
- NullTransformer with default arguments.
Input:
- An Series filled with string values with variable frequency and
at least one null value.
- A np.array of booleans indicating which values are null.
Expected Output:
- The most frequent value in the input series.
"""
# Setup
transformer = NullTransformer('mode')
# Run
data = pd.Series(['a', 'b', 'b', np.nan], name='abc')
missing_value_replacement = transformer._get_missing_value_replacement(data)
# Assert
assert missing_value_replacement == 'b'
def test__get_missing_value_replacement_mean(self):
"""Test _get_missing_value_replacement when missing_value_replacement is mean.
If the missing_value_replacement is mean the output fill value should be the
mean of the input data.
Setup:
- NullTransformer passing 'mean' as the missing_value_replacement.
Input:
- A Series filled with integer values such that the mean
is not contained in the series and there is at least one null.
- A np.array of booleans indicating which values are null.
Expected Output:
- The mode of the inputted Series.
"""
# Setup
transformer = NullTransformer('mean')
# Run
data = pd.Series([1, 2, np.nan], name='abc')
missing_value_replacement = transformer._get_missing_value_replacement(data)
# Assert
assert missing_value_replacement == 1.5
def test__get_missing_value_replacement_mode(self):
"""Test _get_missing_value_replacement when missing_value_replacement is 'mode'.
If the missing_value_replacement is 'mode' the output fill value should be the
mode of the input data.
Setup:
- NullTransformer passing 'mode' as the missing_value_replacement.
Input:
- A Series filled with integer values such that the mean
is not contained in the series and there is at least one null.
- A np.array of booleans indicating which values are null.
Expected Output:
- The most frequent value in the input series.
"""
# Setup
transformer = NullTransformer('mode')
# Run
data = pd.Series([1, 2, 2, np.nan], name='abc')
missing_value_replacement = transformer._get_missing_value_replacement(data)
# Assert
assert missing_value_replacement == 2
def test_fit_model_missing_values_none_and_nulls(self):
"""Test fit when null column is none and there are nulls.
If there are nulls in the data and model_missing_values was given as None,
then the _model_missing_values attribute should be set to True.
Also validate that the null attribute and the _missing_value_replacement attributes
are set accordingly.
Setup:
- A NullTransformer with default arguments.
Input:
- pd.Series of integers that contains nulls.
Expected Side Effects:
- the model_missing_values attribute should be set to True.
- the nulls attribute should be set to True.
- the missing_value_replacement should be set to the mean of the given integers.
"""
# Setup
transformer = NullTransformer(missing_value_replacement='mean', model_missing_values=True)
# Run
data = pd.Series([1, 2, np.nan])
transformer.fit(data)
# Assert
assert transformer.nulls
assert transformer._model_missing_values
assert transformer._missing_value_replacement == 1.5
def test_fit_model_missing_values_none_and_no_nulls(self):
"""Test fit when null column is none and there are NO nulls.
If there are no nulls in the data and model_missing_values was given as ``False``,
then the _model_missing_values attribute should be set to ``False``.
Also validate that the null attribute and the ``_missing_value_replacement`` attributes
are set accordingly.
Setup:
- A NullTransformer with default arguments.
Input:
- pd.Series of strings that contains no nulls.
Expected Side Effects:
- the model_missing_values attribute should be set to False.
- the nulls attribute should be set to False.
- the missing_value_replacement should be set to ``np.nan``, default.
"""
# Setup
transformer = NullTransformer()
# Run
data = pd.Series(['a', 'b', 'b'])
transformer.fit(data)
# Assert
assert not transformer.nulls
assert not transformer._model_missing_values
assert transformer._missing_value_replacement is None
def test_fit_model_missing_values_not_none(self):
"""Test fit when null column is set to True/False.
If model_missing_values is set to True or False, the _model_missing_values should
get that value regardless of whether there are nulls or not.
Notice that this test covers 4 scenarios at once.
Setup:
- 4 NullTransformer intances, 2 of them passing False for the model_missing_values
and 2 of them passing True.
Input:
- 2 pd.Series, one containing nulls and the other not containing nulls.
Expected Side Effects:
- the _model_missing_values attribute should be set to True or False as indicated
in the Transformer creation.
- the nulls attribute should be True or False depending on whether
the input data contains nulls or not.
"""
# Setup
model_missing_values_false_nulls = NullTransformer(
missing_value_replacement='mode',
model_missing_values=False
)
model_missing_values_false_no_nulls = NullTransformer(
missing_value_replacement='mode',
model_missing_values=False
)
model_missing_values_true_nulls = NullTransformer(
missing_value_replacement='mean',
model_missing_values=True
)
model_missing_values_true_no_nulls = NullTransformer(
missing_value_replacement='mean',
model_missing_values=True
)
nulls_str = pd.Series(['a', 'b', 'b', np.nan])
no_nulls_str = pd.Series(['a', 'b', 'b', 'c'])
nulls_int = pd.Series([1, 2, 3, np.nan])
no_nulls_int = pd.Series([1, 2, 3, 4])
# Run
model_missing_values_false_nulls.fit(nulls_str)
model_missing_values_false_no_nulls.fit(no_nulls_str)
model_missing_values_true_nulls.fit(nulls_int)
model_missing_values_true_no_nulls.fit(no_nulls_int)
# Assert
assert not model_missing_values_false_nulls._model_missing_values
assert model_missing_values_false_nulls.nulls
assert model_missing_values_false_nulls._missing_value_replacement == 'b'
assert not model_missing_values_false_no_nulls._model_missing_values
assert not model_missing_values_false_no_nulls.nulls
assert model_missing_values_false_no_nulls._missing_value_replacement == 'b'
assert model_missing_values_true_nulls._model_missing_values
assert model_missing_values_true_nulls.nulls
assert model_missing_values_true_nulls._missing_value_replacement == 2
assert not model_missing_values_true_no_nulls._model_missing_values
assert not model_missing_values_true_no_nulls.nulls
assert model_missing_values_true_no_nulls._missing_value_replacement == 2.5
def test_transform__model_missing_values_true(self):
"""Test transform when _model_missing_values.
When _model_missing_values, the nulls should be replaced
by the _missing_value_replacement and another column flagging the nulls
should be created.
Setup:
- NullTransformer instance with _model_missing_values set to True,
_missing_value_replacement set to a scalar value.
Input:
- A pd.Series of strings with nulls.
Expected Output:
- Exactly the same as the input, replacing the nulls with the
scalar value.
Expected Side Effects:
- The input data has the null values replaced.
"""
# Setup
transformer = NullTransformer()
transformer.nulls = False
transformer._model_missing_values = True
transformer._missing_value_replacement = 'c'
input_data = pd.Series(['a', 'b', np.nan])
# Run
output = transformer.transform(input_data)
# Assert
expected_output = np.array([
['a', 0.0],
['b', 0.0],
['c', 1.0],
], dtype=object)
np.testing.assert_equal(expected_output, output)
def test_transform__model_missing_values_false(self):
"""Test transform when _model_missing_values is False.
When the _model_missing_values is false, the nulls should be replaced
by the _missing_value_replacement.
Setup:
- NullTransformer instance with _model_missing_values set to False,
_missing_value_replacement set to a scalar value.
Input:
- A pd.Series of integers with nulls.
Expected Output:
- Same data as the input, replacing the nulls with the
scalar value.
Expected Side Effects:
- The input data has not been modified.
"""
# Setup
transformer = NullTransformer()
transformer._model_missing_values = False
transformer._missing_value_replacement = 3
input_data = pd.Series([1, 2, np.nan])
# Run
output = transformer.transform(input_data)
# Assert
expected_output = np.array([1, 2, 3])
np.testing.assert_equal(expected_output, output)
modified_input_data = pd.Series([1, 2, np.nan])
pd.testing.assert_series_equal(modified_input_data, input_data)
def test_reverse_transform__model_missing_values_true_nulls_true(self):
"""Test reverse_transform when _model_missing_values and nulls are True.
When _model_missing_values and nulls attributes are both True, the second column
in the input data should be used to decide which values to replace
with nan, by selecting the rows where the null column value is > 0.5.
Setup:
- NullTransformer instance with _model_missing_values and nulls
attributes set to True.
Input:
- 2d numpy array with variate float values.
Expected Output:
- pd.Series containing the first column from the input data
with the values indicated by the first column replaced by nans.
Expected Side Effects:
- the input data should have been modified.
"""
# Setup
transformer = NullTransformer()
transformer._model_missing_values = True
transformer.nulls = True
input_data = np.array([
[0.0, 0.0],
[0.2, 0.2],
[0.4, 0.4],
[0.6, 0.6],
[0.8, 0.8],
])
# Run
output = transformer.reverse_transform(input_data)
# Assert
expected_output = pd.Series([0.0, 0.2, 0.4, np.nan, np.nan])
pd.testing.assert_series_equal(expected_output, output)
def test_reverse_transform__model_missing_values_true_nulls_false(self):
"""Test reverse_transform when _model_missing_values and nulls is False.
When _model_missing_values but nulls are False, the second column of the
input data must be dropped and the first one returned as a Series without
having been modified.
Setup:
- NullTransformer instance with _model_missing_values set to True and nulls
attribute set to False
Input:
- 2d numpy array with variate float values.
Expected Output:
- pd.Series containing the first column from the input data unmodified.
"""
# Setup
transformer = NullTransformer()
transformer._model_missing_values = True
transformer.nulls = False
input_data = np.array([
[0.0, 0.0],
[0.2, 0.2],
[0.4, 0.4],
[0.6, 0.6],
[0.8, 0.8],
])
# Run
output = transformer.reverse_transform(input_data)
# Assert
expected_output = pd.Series([0.0, 0.2, 0.4, 0.6, 0.8])
pd.testing.assert_series_equal(expected_output, output)
@patch('rdt.transformers.null.np.random')
def test_reverse_transform__model_missing_values_false_nulls_true(self, random_mock):
"""Test reverse_transform when _model_missing_values is False and nulls.
When _model_missing_values is False and the nulls attribute, a ``_null_percentage``
of values should randomly be replaced with ``np.nan``.
Setup:
- NullTransformer instance with _model_missing_values set to False and nulls
attribute set to True.
- A mock for ``np.random``.
Input:
- 1d numpy array with variate float values.
Expected Output:
- pd.Series containing the same data as input, with the random values
replaced with ``np.nan``.
"""
# Setup
transformer = NullTransformer()
transformer._model_missing_values = False
transformer.nulls = True
transformer._null_percentage = 0.5
input_data = np.array([0.0, 0.2, 0.4, 0.6])
random_mock.random.return_value = np.array([1, 1, 0, 1])
# Run
output = transformer.reverse_transform(input_data)
# Assert
expected_output = pd.Series([0.0, 0.2, np.nan, 0.6])
|
pd.testing.assert_series_equal(expected_output, output)
|
pandas.testing.assert_series_equal
|
# Genuary 2022, Jan 5
# Task: Destroy a Square
# Idea: Draw a Square then shuffle all the xy coords
# import python libraries, install with 'pip install ....'
import numpy as np
import pandas as pd
import random as rn
# create a square path using x & y coords
x = list(range(0,24)) + [24]*25 + list(range(24,-1,-1)) + [0]*25
y = [0]*25 + list(range(0,24)) + [24]*25 + list(range(24,-1,-1))
path = list(range(0,len(x)))
# create new positions for the xy coords
new_x = [rn.uniform(0, 24) for p in range(0, len(x))]
new_y = [rn.uniform(0, 24) for p in range(0, len(x))]
# create data frame for square
d = {'x': x, 'y': y,'path': path,'stage':1}
df = pd.DataFrame(data=d)
# create data frame for square after being shuffled
d_new = {'x': new_x, 'y': new_y,'path': path,'stage':2}
df_new =
|
pd.DataFrame(data=d_new)
|
pandas.DataFrame
|
import pandas as pd
from pandas.api.types import is_numeric_dtype, is_categorical, infer_dtype
from functools import reduce
import warnings
import weakref
from itertools import combinations
from scipy.stats import chi2_contingency
import numpy as np
from collections import Counter
@pd.api.extensions.register_dataframe_accessor("cats")
class CatsAccessor:
"""A class of useful categorical stuff to add to pandas
"""
def __init__(self, pandas_obj):
self._finalizer = weakref.finalize(self, self._cleanup)
self._validate(pandas_obj)
self._obj = pandas_obj
self._categorical_columns = None
def _cleanup(self):
del self._obj
def remove(self):
self._finalizer()
@staticmethod
def _validate(obj):
# verify this is a DataFrame
if not isinstance(obj, pd.DataFrame):
raise AttributeError("Must be a pandas DataFrame")
def _get_categorical_columns(self):
result = [col for col in self._obj.columns if infer_dtype(self._obj[col]) in ['object', 'string', 'category', 'categorical']]
self._categorical_columns = result
return result
def _cramers_corrected_stat(self, confusion_matrix, correction: bool) -> float:
"""Calculate the Cramer's V corrected stat for two variables.
Function from pandas-profiling.github.io
Args:
confusion_matrix: Crosstab between two variables.
correction: Should the correction be applied?
Returns:
The Cramer's V corrected stat for the two variables.
"""
chi2 = chi2_contingency(confusion_matrix, correction=correction)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
# Deal with NaNs later on
with np.errstate(divide="ignore", invalid="ignore"):
phi2corr = max(0.0, phi2 - ((k - 1.0) * (r - 1.0)) / (n - 1.0))
rcorr = r - ((r - 1.0) ** 2.0) / (n - 1.0)
kcorr = k - ((k - 1.0) ** 2.0) / (n - 1.0)
corr = np.sqrt(phi2corr / min((kcorr - 1.0), (rcorr - 1.0)))
return corr
def corr(self, correction = True):
self._get_categorical_columns()
results = pd.DataFrame()
combos = combinations(self._categorical_columns , 2)
for combo in list(combos):
print(combo)
cat_matrix = pd.crosstab(self._obj[combo[0]], self._obj[combo[1]])
corr_coef = self._cramers_corrected_stat(cat_matrix, correction)
results_series = pd.Series([combo[0], combo[1], corr_coef])
results =
|
pd.concat([results, results_series], axis=1)
|
pandas.concat
|
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.decomposition import pca
sns.set_style("whitegrid")
class Weights:
def __init__(self, tumor_path: pd.DataFrame, sample_dir: str):
self.tumor_path = tumor_path
self.tumor = self._load_tumor()
self.genes = self.tumor.columns[5:]
self.sample_dir = sample_dir
self.df = self._weight_df()
self.perc = self._perc_df()
self.num_samples = len(self.df["sample"].unique())
def _weight_df(self) -> pd.DataFrame:
"""
Creates DataFrame of sample weights from a directory of samples
Columns: tissue, normal_tissue, weight, sample_id
Returns:
DataFrame of Weights
"""
# DataFrame: cols=tissue, normal_tissue, weight
weights = []
tissues = self.tumor.tissue
for sample in os.listdir(self.sample_dir):
sample_tissue = tissues.loc[sample]
w = pd.read_csv(
os.path.join(self.sample_dir, sample, "weights.tsv"), sep="\t"
)
w.columns = ["normal_tissue", "Median", "std"]
w["tissue"] = sample_tissue
w["sample"] = sample
weights.append(w.drop("std", axis=1))
return pd.concat(weights).reset_index(drop=True)
def _perc_df(self) -> pd.DataFrame:
"""
Converts DataFrame of weights into a DataFrame of percentages
Returns:
Weight percentage DataFrame
"""
c = self.df.groupby(["tissue", "normal_tissue"])["Median"].sum().rename("count")
perc = c / c.groupby(level=0).sum() * 100
return perc.reset_index()
def _load_tumor(self):
print(f"Reading in {self.tumor_path}")
if self.tumor_path.endswith(".csv"):
df = pd.read_csv(self.tumor_path, index_col=0)
elif self.tumor_path.endswith(".tsv"):
df = pd.read_csv(self.tumor_path, sep="\t", index_col=0)
else:
try:
df = pd.read_hdf(self.tumor_path)
except Exception as e:
print(e)
raise RuntimeError(f"Failed to open DataFrame: {self.tumor_path}")
return df
def plot_match_scatter(self, out_dir: str = None):
"""
Scatterplot of samples by tissue and their matched tissue model weight
Args:
out_dir: Optional output directory
Returns:
Plot axes object
"""
df = self.df
# Subset for matched-tissue samples
df = df[df.normal_tissue == df.tissue].sort_values("tissue")
f, ax = plt.subplots(figsize=(8, 4))
sns.swarmplot(data=df, x="tissue", y="Median")
plt.xticks(rotation=45)
plt.xlabel("Tissue")
plt.ylabel("GTEx Matched Tissue Weight")
plt.title("TCGA Tumor Samples and Model Weight for GTEx Matched Tissue")
if out_dir:
plt.savefig(os.path.join(out_dir, "matched_weight_scatter.svg"))
return ax
def plot_perc_heatmap(self, out_dir: str = None):
"""
Heatmap of weight percentages by
Args:
out_dir: Optional output directory
Returns:
Plot axes object
"""
f, ax = plt.subplots(figsize=(8, 7))
perc_heat = self.perc.pivot(
index="normal_tissue", columns="tissue", values="count"
)
sns.heatmap(
perc_heat.apply(lambda x: round(x, 2)),
cmap="Blues",
annot=True,
linewidths=0.5,
)
plt.xlabel("Tumor Tissue")
plt.ylabel("GTEx Tissue")
plt.title(f"Average Weight (%) of Tumor to GTEx Tissue (n={self.num_samples})")
if out_dir:
plt.savefig(os.path.join(out_dir, "weight_perc_heatmap.svg"))
return ax
def plot_pca_nearby_tissues(self, background_path: str, tissues, tumor_tissue):
df = pd.read_hdf(background_path)
tumor = self.tumor
tumor = tumor[tumor.tissue == tumor_tissue]
tumor["tissue"] = f"{tumor_tissue}-Tumor"
sub = df[df.tissue.isin(tissues)]
pca_df =
|
pd.concat([tumor, sub])
|
pandas.concat
|
import numpy as np
import pandas as pd
from datetime import datetime
import json
from datetime import timedelta
# R:年度无风险利率
# T:一年的周期个数,以月为周期T=12,以周为周期T=52
# 年化夏普比率(R为一年期的无风险利率;T为一年的周期个数,以月为周期T=12,以周为周期T=52)
from Time.datatime import get_firstday_year
def get_sharpe_ratio(yield_list, R, T):
'''
:param yield_list:
:param R:
:param T:
:return:
'''
yield_list = yield_list.dropna()
if len(yield_list) > 1:
return ((np.average(yield_list)+1)**T-1-R)/(np.std(yield_list) * np.sqrt(T))
else:
return np.nan
# 标准差
def get_year_std(yield_list):
yield_list = yield_list.dropna()
if len(yield_list) > 1:
return yield_list.std()
else:
return np.nan
# 年化下行标准差(R_T为对应周期的无风险利率)
def get_DownStd(yield_list, R, T):
yield_list = yield_list.dropna()
R_T = (R + 1) ** (1 / T) - 1
newlist = []
for i in yield_list:
if i<R_T:
newlist.append((i-R_T)**2)
else:
continue
return np.sqrt(np.average(newlist) * T)
# 最大回撤,s是以日期为索引的Series
def get_max_retracement(s):
s_retracement = 1 - s / s.expanding(min_periods=1).max()
edate = s_retracement.idxmax()
max_retracement = s_retracement[edate]
bdate = s[:edate].idxmax()
rdate = s[s > s[bdate]][edate:].index.min()
rdays = (rdate - edate).days
return [max_retracement, bdate, edate, rdate, rdays]
# 最大回撤,s_source是以日期为索引的Series
def get_max_retracement(s_source, current_T, section='total'):
if section == 'total':
s = s_source[:current_T]
elif section == 'year':
if get_firstday_year(current_T) < s_source.index[0]:
return [np.nan, np.nan, np.nan, np.nan, np.nan]
else:
s = s_source[get_firstday_year(current_T):current_T]
elif section == 'm3':
if (current_T -
|
pd.DateOffset(months=3)
|
pandas.DateOffset
|
from config.data import Config
import mpvr.datamodule._fig_preset as fig_preset
import importlib
import pandas as pd
import numpy as np
class Manager:
def __init__(self, *args, **kwargs):
self.conf = kwargs.get('conf')
self._section = kwargs.get('section')
self._tags = self.conf.get_tags()
self._load_state = {'scenario': False,
'incidence': False,
'timestamp': False}
self._preprocess = importlib.import_module( 'mpvr.datamodule.' + str.lower(self._section))
@classmethod
def from_config(cls, section, conf = Config):
return cls(conf=conf, section=section)
@staticmethod
def section_list():
return Config.section_list()
def get_scenarios(self):
return Config.get_section_scenarios(self._section)
def set_scenario(self, scenario):
self._setting = self.conf.get_config(self._section, scenario)
self._scenario = scenario
self._load_state_init()
self._load_state['scenario'] = True
def _load_state_init(self):
for key in self._load_state.keys():
self._load_state[key] = False
def _check_and_load(self, states):
load_funcs = {'incidence': self._load_incidence_data,
'timestamp': self._load_timestamp_data}
for state in states:
if not self._load_state[state]:
load_funcs[state]()
###############################################################################################
# get functions #
###############################################################################################
def get_motion_data_gen(self,
path=None,
timediffs=None,
indices=None,
axes = None,
sensored_axes_tag=None,
target_sampling_rate=None):
if timediffs is None:
self._check_and_load(['timestamp'])
indices = self._indices
timediffs = self._timediffs
if path is None:
path = self._setting.motion_data.path
if axes is None:
axes = self._setting.motion_data.axes
if sensored_axes_tag is None:
sensored_axes_tag = self._setting.motion_data.sensored_axes_tag
if target_sampling_rate is None:
target_sampling_rate = self._setting.target_sampling_rate
return self._preprocess.load_motion_gen(path,
axes,
sensored_axes_tag,
target_sampling_rate,
indices,
timediffs)
def get_classified_motion_data_gen(self, gen=None, is_classified=None, seperator=None):
if gen is None:
gen = self.get_motion_data_gen()
if is_classified is None:
is_classified = self._setting.motion_data.is_classified
if seperator is None:
seperator = self._setting.motion_data.motion_seperator
if is_classified:
for motion_vector in gen:
yield self._preprocess.classification_motion(motion_vector)
else:
motion_data = [x for x in gen]
bins = self._preprocess.make_bins(motion_data, seperator)
for motion_vector in motion_data:
yield self._preprocess.classification_motion(motion_vector, bins)
def get_visual_data_gen(self,
path=None,
indices=None,
timediffs=None,
extension=None,
target_sampling_rate=None):
if timediffs is None:
self._check_and_load(['timestamp'])
indices = self._indices
timediffs = self._timediffs
if path is None:
path = self._setting.video_data.path
if extension is None:
extension = self._setting.video_data.extension
if target_sampling_rate is None:
target_sampling_rate = self._setting.target_sampling_rate
return self._preprocess \
.load_visual_gen(path,
target_sampling_rate,
extension,
indices,
timediffs)
def get_classified_visual_data_gen(self, gen=None):
if gen is None:
gen = self.get_visual_data_gen()
for polars in gen:
yield self._preprocess.classification_visual(polars)
def make_tuple_gen(self, gen1, gen2):
for d in zip(*(gen1, gen2)):
yield d
def get_incidence_data(self):
self._check_and_load(['incidence'])
return self._incidence
def get_timestamp_data(self):
self._check_and_load(['timestamp'])
return self._times, self._timediffs, self._indices
def get_processed_data(self, tag, path=None, remark_dir=''):
tags = self._tags
if path is None:
path = self._setting.save_result_path + tags[tag]['dir'] + tags['tbl']['dir'] \
+ remark_dir + self._scenario + tags['tbl']['ext']
df =
|
pd.read_csv(path, encoding="ISO-8859-1")
|
pandas.read_csv
|
import matplotlib.pyplot as plt
import algorithm2_v3
import pandas as pd
# n为指定点个数
def get_xy1(data_raw, n, adjustment_f, best_f, label, b):
x = []
y_ours = []
y_cs = []
y_gs = []
algorithm2_v3.initial_everything1(data_raw, adjustment_f, label, b)
print('--------------------------')
for i in range(1,n+1):
y1, y2, y3 = algorithm2_v3.mae1(data_raw, i / n, best_f)
y_ours.append(y1)
y_cs.append(y2)
y_gs.append(y3)
x.append(i / n)
return x, y_ours, y_cs, y_gs
# n为指定点个数
def get_xy2(data_raw, n, adjustment_f, best_f, label, b):
x = []
y_ours = []
y_cs = []
y_gs = []
algorithm2_v3.initial_everything2(data_raw, adjustment_f,label, b)
for i in range(1,n+1):
y1, y2, y3 = algorithm2_v3.mae2(data_raw, i / n, best_f)
y_ours.append(y1)
y_cs.append(y2)
y_gs.append(y3)
x.append(i / n)
return x, y_ours, y_cs, y_gs
def painting(x, y_ours, y_cs, y_gs, n, func_name, data_name, b):
plt.rcParams['font.sans-serif'] = 'Times New Roman'
plt.title('%s-%s-b=%d' % (func_name,data_name,b))
plt.rcParams['lines.marker'] = '.'
plt.grid() # 生成网格
plt.xlabel('Epslion')
plt.ylabel('MAE')
plt.xticks(x)
plt.plot(x, y_ours)
plt.plot(x, y_cs)
plt.plot(x, y_gs)
plt.legend(['Private CSₚ', 'Private CS', 'Private GS'])
plt.savefig('%s-%s-b=%d' % (func_name,data_name,b))
plt.show()
def painting_on_1picture(dicts):
# 全局设置
plt.rcParams['font.sans-serif'] = 'Times New Roman'
plt.rcParams['lines.marker'] = '.'
plt.rcParams['axes.grid'] = True
p = plt.figure(figsize=(8, 6), dpi=80) ## 确定画布大小
k=0
for i in dicts:
k += 1
p.add_subplot(2, 2, k) ## 创建一个2行2列的子图,并开始绘制第k幅
plt.title('')
plt.xlabel('Epslion')
plt.ylabel('MAE')
x = i['x']
plt.xticks(x)
plt.plot(x, i['y_ours'])
plt.plot(x, i['y_cs'])
plt.plot(x, i['y_gs'])
plt.legend(['Private CSₚ', 'Private CS', 'Private GS'])
plt.savefig('----')
plt.show()
# 将计算出来的数据存储至本地
def save_data(x, y_ours, y_cs, y_gs, func_name, data_name, b):
df = pd.DataFrame()
df['y_ours'] = y_ours
df['y_cs'] = y_cs
df['y_gs'] = y_gs
df['x'] = x
df.to_excel('%s-%s-b=%d.xlsx' % (func_name,data_name,b))
# 从本地文件读取data
def read_data(path):
df =
|
pd.read_excel(path)
|
pandas.read_excel
|
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp =
|
pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
|
pandas.TimedeltaIndex
|
import os
import subprocess
import findspark
from pyspark import SparkContext, SparkConf
import numpy as np
import pandas as pd
from numpy import linalg as LA
from statistics import mean
import ipyleaflet
from ipyleaflet import (
Map,
Marker,
TileLayer, ImageOverlay,
Polyline, Polygon, Rectangle, Circle, CircleMarker,
GeoJSON,
DrawControl
)
from matplotlib.colors import rgb2hex
from matplotlib.pyplot import xticks, yticks, figure
import pylab as plt
from ipyleaflet import Heatmap, WidgetControl, FullScreenControl
from ipywidgets import IntSlider, jslink
class leaflet_old:
"""
Plots circles on a map (one per station) whose size is proportional to the number of feature measurements, and whose color is equal to their aggregated value (min, max, avg)
:param featureStr: the feature you'd like to visualize given as a string, eg. 'SNWD'
:param aggregateType: 'avg', 'min', 'max'
"""
def __init__(self, sqlctxt, featureStr):
self.feat = featureStr
self.pdfMaster = None
self.sqlctxt = sqlctxt
self.maxLong = None
self.minLong = None
self.maxLat = None
self.minLat = None
self.minAggVal = None
self.maxAggVal = None
self.cmap = plt.get_cmap('jet_r')
self.m = None
def add(self, dataframe):
"""
Adds (or concatenates) a dataframe to the instance's master dataframe. The dataframe to be added must have atleast
the following columns: station, latitude, longitude, featureStr, value
"""
self.sqlctxt.registerDataFrameAsTable(dataframe, "temp")
query = f"""
SELECT Station, latitude, longitude, COUNT(Measurement), MEAN(Values)
FROM temp
WHERE Measurement=='SNWD'
GROUP BY Station, latitude, longitude
"""
tempdf = self.sqlctxt.sql(query).toPandas()
if (self.pdfMaster is None):
# set as new pdfMaster
self.pdfMaster = tempdf
else:
# append to existing dfMaster
self.pdfMaster =
|
pd.concat([self.pdfMaster, tempdf])
|
pandas.concat
|
import os
import pandas as pd
from numpy import testing as npt
import pandas.util.testing as pdt
import ixmp
import pytest
from ixmp.default_path_constants import CONFIG_PATH
from testing_utils import (
test_mp,
test_mp_props,
test_mp_use_default_dbprops_file,
test_mp_use_db_config_path,
)
test_args = ('Douglas Adams', 'Hitchhiker')
can_args = ('canning problem', 'standard')
# string columns for timeseries checks
iamc_idx_cols = ['model', 'scenario', 'region', 'variable', 'unit']
cols_str = ['region', 'variable', 'unit', 'year']
def local_config_exists():
return os.path.exists(CONFIG_PATH)
@pytest.mark.skipif(local_config_exists(),
reason='will not overwrite local config files')
def test_default_dbprops_file(test_mp_use_default_dbprops_file):
test_mp = test_mp_use_default_dbprops_file
scenario = test_mp.scenario_list(model='Douglas Adams')['scenario']
assert scenario[0] == 'Hitchhiker'
@pytest.mark.skipif(local_config_exists(),
reason='will not overwrite local config files')
def test_db_config_path(test_mp_use_db_config_path):
test_mp = test_mp_use_db_config_path
scenario = test_mp.scenario_list(model='Douglas Adams')['scenario']
assert scenario[0] == 'Hitchhiker'
def test_platform_init_raises():
pytest.raises(ValueError, ixmp.Platform, dbtype='foo')
def test_scen_list(test_mp):
scenario = test_mp.scenario_list(model='Douglas Adams')['scenario']
assert scenario[0] == 'Hitchhiker'
def test_new_scen(test_mp):
scen = ixmp.Scenario(test_mp, *can_args, version='new')
assert scen.version == 0
def test_default_version(test_mp):
scen = ixmp.Scenario(test_mp, *can_args)
assert scen.version == 2
def test_init_par_35(test_mp):
scen = ixmp.Scenario(test_mp, *can_args, version='new')
scen.init_set('ii')
scen.init_par('new_par', idx_sets='ii')
def test_get_scalar(test_mp):
scen = ixmp.Scenario(test_mp, *can_args)
obs = scen.scalar('f')
exp = {'unit': 'USD/km', 'value': 90}
assert obs == exp
def test_init_scalar(test_mp):
scen = ixmp.Scenario(test_mp, *can_args)
scen2 = scen.clone(keep_solution=False)
scen2.check_out()
scen2.init_scalar('g', 90.0, 'USD/km')
scen2.commit("adding a scalar 'g'")
# make sure that changes to a scenario are copied over during clone
def test_add_clone(test_mp):
scen = ixmp.Scenario(test_mp, *can_args, version=1)
scen.check_out()
scen.init_set('h')
scen.add_set('h', 'test')
scen.commit("adding an index set 'h', wiht element 'test'")
scen2 = scen.clone(keep_solution=False)
obs = scen2.set('h')
npt.assert_array_equal(obs, ['test'])
# make sure that (only) the correct scenario is touched after cloning
def test_clone_edit(test_mp):
scen = ixmp.Scenario(test_mp, *can_args)
scen2 = scen.clone(keep_solution=False)
scen2.check_out()
scen2.change_scalar('f', 95.0, 'USD/km')
scen2.commit('change transport cost')
obs = scen.scalar('f')
exp = {'unit': 'USD/km', 'value': 90}
assert obs == exp
obs = scen2.scalar('f')
exp = {'unit': 'USD/km', 'value': 95}
assert obs == exp
def test_idx_name(test_mp):
scen = ixmp.Scenario(test_mp, *can_args)
df = scen.idx_names('d')
npt.assert_array_equal(df, ['i', 'j'])
def test_var_marginal(test_mp):
scen = ixmp.Scenario(test_mp, *can_args)
df = scen.var('x', filters={'i': ['seattle']})
npt.assert_array_almost_equal(df['mrg'], [0, 0, 0.036])
def test_var_level(test_mp):
scen = ixmp.Scenario(test_mp, *can_args)
df = scen.var('x', filters={'i': ['seattle']})
npt.assert_array_almost_equal(df['lvl'], [50, 300, 0])
def test_var_general_str(test_mp):
scen = ixmp.Scenario(test_mp, *can_args)
df = scen.var('x', filters={'i': ['seattle']})
npt.assert_array_equal(
df['j'], ['new-york', 'chicago', 'topeka'])
def test_unit_list(test_mp):
units = test_mp.units()
assert ('cases' in units) is True
def test_add_unit(test_mp):
test_mp.add_unit('test', 'just testing')
def test_par_filters_unit(test_mp):
scen = ixmp.Scenario(test_mp, *can_args)
df = scen.par('d', filters={'i': ['seattle']})
obs = df.loc[0, 'unit']
exp = 'km'
assert obs == exp
def test_new_timeseries(test_mp):
scen = ixmp.TimeSeries(test_mp, *test_args, version='new', annotation='fo')
df = {'year': [2010, 2020], 'value': [23.5, 23.6]}
df = pd.DataFrame.from_dict(df)
df['region'] = 'World'
df['variable'] = 'Testing'
df['unit'] = '???'
scen.add_timeseries(df)
scen.commit('importing a testing timeseries')
def test_new_timeseries_error(test_mp):
scen = ixmp.TimeSeries(test_mp, *test_args, version='new', annotation='fo')
df = {'year': [2010, 2020], 'value': [23.5, 23.6]}
df =
|
pd.DataFrame.from_dict(df)
|
pandas.DataFrame.from_dict
|
from __future__ import print_function
import pytest
import sys
pytestmark = pytest.mark.skipif(sys.platform == 'win32',
reason='Requires Mac or Linux')
boto = pytest.importorskip('boto')
import os
import itertools
import json
from contextlib import contextmanager, closing
import datashape
from datashape import string, float64, int64
from datashape.util.testing import assert_dshape_equal
import pandas as pd
import pandas.util.testing as tm
from odo import into, resource, S3, discover, CSV, drop, append, odo
from odo.backends.aws import get_s3_connection
from odo.utils import tmpfile
from odo.compatibility import urlopen
from boto.exception import S3ResponseError, NoAuthHandlerFound
tips_uri = 's3://nyqpug/tips.csv'
df = pd.DataFrame({
'a': list('abc'),
'b': [1, 2, 3],
'c': [1.0, 2.0, 3.0]
})[['a', 'b', 'c']]
js = pd.io.json.loads(pd.io.json.dumps(df, orient='records'))
is_authorized = False
tried = False
with closing(urlopen('http://httpbin.org/ip')) as url:
public_ip = json.loads(url.read().decode())['origin']
cidrip = public_ip + '/32'
@pytest.yield_fixture
def tmpcsv():
with tmpfile('.csv') as fn:
with open(fn, mode='w') as f:
df.to_csv(f, index=False)
yield fn
@contextmanager
def s3_bucket(extension):
with conn():
b = 's3://%s/%s%s' % (test_bucket_name, next(_tmps), extension)
try:
yield b
finally:
drop(resource(b))
@contextmanager
def conn():
# requires that you have a config file or envars defined for credentials
# this code makes me hate exceptions
try:
conn = get_s3_connection()
except S3ResponseError:
pytest.skip('unable to connect to s3')
else:
try:
grants = conn.get_bucket(test_bucket_name).get_acl().acl.grants
except S3ResponseError:
pytest.skip('no permission to read on bucket %s' %
test_bucket_name)
else:
if not any(g.permission == 'FULL_CONTROL' or
g.permission == 'READ' for g in grants):
pytest.skip('no permission to read on bucket %s' %
test_bucket_name)
else:
yield conn
test_bucket_name = 'into-redshift-csvs'
_tmps = ('tmp%d' % i for i in itertools.count())
@pytest.fixture
def s3_encryption_bucket():
test_bucket = os.getenv('ODO_S3_ENCRYPTION_BUCKET')
if not test_bucket:
pytest.skip('No bucket defined that requires server-side encryption')
return test_bucket
def test_s3_encrypted_upload(s3_encryption_bucket):
s3_connection = boto.connect_s3()
df = tm.makeMixedDataFrame()
with tmpfile('.csv') as fn:
df.to_csv(fn, index=False)
s3_uri = 's3://{bucket}/{fn}'.format(bucket=s3_encryption_bucket, fn=os.path.basename(fn))
odo(fn, s3_uri, s3=s3_connection, encrypt_key=True)
result = odo(s3_uri, pd.DataFrame, s3=s3_connection)
tm.assert_frame_equal(df, result)
def test_s3_encrypted_multipart_upload(s3_encryption_bucket):
s3_connection = boto.connect_s3()
df = tm.makeMixedDataFrame()
with tmpfile('.csv') as fn:
df.to_csv(fn, index=False)
s3_uri = 's3://{bucket}/{fn}'.format(bucket=s3_encryption_bucket, fn=os.path.basename(fn))
odo(fn, s3_uri, s3=s3_connection, encrypt_key=True, multipart=True)
result = odo(s3_uri, pd.DataFrame, s3=s3_connection)
tm.assert_frame_equal(df, result)
def test_s3_resource():
csv = resource(tips_uri)
assert isinstance(csv, S3(CSV))
def test_s3_discover():
csv = resource(tips_uri)
assert isinstance(discover(csv), datashape.DataShape)
def test_s3_to_local_csv():
with tmpfile('.csv') as fn:
csv = into(fn, tips_uri)
path = os.path.abspath(csv.path)
assert os.path.exists(path)
def test_csv_to_s3_append():
df = tm.makeMixedDataFrame()
with tmpfile('.csv') as fn:
with s3_bucket('.csv') as b:
s3 = resource(b)
df.to_csv(fn, index=False)
append(s3, CSV(fn))
result = into(pd.DataFrame, s3)
tm.assert_frame_equal(df, result)
def test_csv_to_s3_into():
df = tm.makeMixedDataFrame()
with tmpfile('.csv') as fn:
with s3_bucket('.csv') as b:
df.to_csv(fn, index=False)
s3 = into(b, CSV(fn))
result = into(pd.DataFrame, s3)
tm.assert_frame_equal(df, result)
def test_frame_to_s3_to_frame():
with s3_bucket('.csv') as b:
s3_csv = into(b, df)
result = into(pd.DataFrame, s3_csv)
tm.assert_frame_equal(result, df)
def test_textfile_to_s3():
text = 'A cow jumped over the moon'
with tmpfile('.txt') as fn:
with s3_bucket('.txt') as b:
with open(fn, mode='w') as f:
f.write(os.linesep.join(text.split()))
result = into(b, resource(fn))
assert discover(result) == datashape.dshape('var * string')
def test_jsonlines_to_s3():
with tmpfile('.json') as fn:
with open(fn, mode='w') as f:
for row in js:
f.write(
|
pd.io.json.dumps(row)
|
pandas.io.json.dumps
|
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
import warnings
from unittest import TestCase
import pandas as pd
from tsfresh.utilities import dataframe_functions
import numpy as np
import six
class NormalizeTestCase(TestCase):
def test_with_dictionaries_one_row(self):
test_df = pd.DataFrame([{"value": 1, "id": "id_1"}])
test_dict = {"a": test_df, "b": test_df}
# A kind is not allowed with dicts
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, "a kind", None)
# The value must be present
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, "something other")
# Nothing should have changed compared to the input data
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, "value")
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
six.assertCountEqual(self, list(test_dict.keys()), list(result_dict.keys()))
self.assertEqual(result_dict["a"].iloc[0].to_dict(), {"value": 1, "id": "id_1"})
# The algo should choose the correct value column
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, None)
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
def test_with_dictionaries_two_rows(self):
test_df = pd.DataFrame([{"value": 2, "sort": 2, "id": "id_1"},
{"value": 1, "sort": 1, "id": "id_1"}])
test_dict = {"a": test_df, "b": test_df}
# If there are more than one column, the algorithm can not choose the correct column
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, None)
# Sorting should work
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", "sort", None, "value")
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
# Assert sorted and without sort column
self.assertEqual(result_dict["a"].iloc[0].to_dict(), {"value": 1, "id": "id_1"})
self.assertEqual(result_dict["a"].iloc[1].to_dict(), {"value": 2, "id": "id_1"})
# Assert the algo has found the correct column
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", "sort", None, None)
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
def test_with_dictionaries_two_rows_sorted(self):
test_df = pd.DataFrame([{"value": 2, "id": "id_1"},
{"value": 1, "id": "id_1"}])
test_dict = {"a": test_df, "b": test_df}
# Pass the id
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, "value")
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
self.assertEqual(result_dict["a"].iloc[0].to_dict(), {"value": 2, "id": "id_1"})
# The algo should have found the correct value column
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, None)
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
def test_with_df(self):
# give everyting
test_df = pd.DataFrame([{"id": 0, "kind": "a", "value": 3, "sort": 1}])
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_df, "id", "sort", "kind", "value")
self.assertEqual(column_id, "id")
self.assertEqual(column_value, "value")
self.assertIn("a", result_dict)
six.assertCountEqual(self, list(result_dict["a"].columns), ["id", "value"])
self.assertEqual(list(result_dict["a"]["value"]), [3])
self.assertEqual(list(result_dict["a"]["id"]), [0])
# give no kind
test_df = pd.DataFrame([{"id": 0, "value": 3, "sort": 1}])
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_df, "id", "sort", None, "value")
self.assertEqual(column_id, "id")
self.assertEqual(column_value, "value")
self.assertIn("value", result_dict)
six.assertCountEqual(self, list(result_dict["value"].columns), ["id", "value"])
self.assertEqual(list(result_dict["value"]["value"]), [3])
self.assertEqual(list(result_dict["value"]["id"]), [0])
# Let the function find the values
test_df = pd.DataFrame([{"id": 0, "a": 3, "b": 5, "sort": 1}])
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_df, "id", "sort", None, None)
self.assertEqual(column_id, "id")
self.assertEqual(column_value, "_value")
self.assertIn("a", result_dict)
self.assertIn("b", result_dict)
six.assertCountEqual(self, list(result_dict["a"].columns), ["_value", "id"])
self.assertEqual(list(result_dict["a"]["_value"]), [3])
self.assertEqual(list(result_dict["a"]["id"]), [0])
six.assertCountEqual(self, list(result_dict["b"].columns), ["_value", "id"])
self.assertEqual(list(result_dict["b"]["_value"]), [5])
self.assertEqual(list(result_dict["b"]["id"]), [0])
def test_with_wrong_input(self):
test_df = pd.DataFrame([{"id": 0, "kind": "a", "value": 3, "sort": np.NaN}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", "sort", "kind", "value")
test_df = pd.DataFrame([{"id": 0, "kind": "a", "value": 3, "sort": 1}])
self.assertRaises(AttributeError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"strange_id", "sort", "kind", "value")
test_df = pd.DataFrame([{"id": np.NaN, "kind": "a", "value": 3, "sort": 1}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", "sort", "kind", "value")
test_df = pd.DataFrame([{"id": 0}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", None, None, None)
test_df = pd.DataFrame([{"id": 2}, {"id": 1}])
test_dict = {"a": test_df, "b": test_df}
# If there are more than one column, the algorithm can not choose the correct column
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, None)
test_dict = {"a": pd.DataFrame([{"id": 2, "value_a": 3}, {"id": 1, "value_a": 4}]),
"b": pd.DataFrame([{"id": 2}, {"id": 1}])}
# If there are more than one column, the algorithm can not choose the correct column
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, None)
test_df = pd.DataFrame([{"id": 0, "value": np.NaN}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", None, None, "value")
test_df = pd.DataFrame([{"id": 0, "value": np.NaN}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
None, None, None, "value")
class RollingTestCase(TestCase):
def test_with_wrong_input(self):
test_df = pd.DataFrame({"id": [0, 0], "kind": ["a", "b"], "value": [3, 3], "sort": [np.NaN, np.NaN]})
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort="sort", column_kind="kind",
rolling_direction=1)
test_df = pd.DataFrame({"id": [0, 0], "kind": ["a", "b"], "value": [3, 3], "sort": [1, 1]})
self.assertRaises(AttributeError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="strange_id",
column_sort="sort", column_kind="kind",
rolling_direction=1)
test_df = {"a": pd.DataFrame([{"id": 0}])}
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort=None, column_kind="kind",
rolling_direction=1)
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id=None,
column_sort=None, column_kind="kind",
rolling_direction=1)
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort=None, column_kind=None,
rolling_direction=0)
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id=None,
column_sort=None, column_kind=None,
rolling_direction=0)
def test_assert_single_row(self):
test_df = pd.DataFrame([{"id": np.NaN, "kind": "a", "value": 3, "sort": 1}])
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort="sort", column_kind="kind",
rolling_direction=1)
def test_positive_rolling(self):
first_class = pd.DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": range(4)})
second_class = pd.DataFrame({"a": [10, 11], "b": [12, 13], "time": range(20, 22)})
first_class["id"] = 1
second_class["id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
correct_indices = (["id=1, shift=3"] * 1 +
["id=1, shift=2"] * 2 +
["id=1, shift=1"] * 3 +
["id=2, shift=1"] * 1 +
["id=1, shift=0"] * 4 +
["id=2, shift=0"] * 2)
correct_values_a = [1, 1, 2, 1, 2, 3, 10, 1, 2, 3, 4, 10, 11]
correct_values_b = [5, 5, 6, 5, 6, 7, 12, 5, 6, 7, 8, 12, 13]
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=1)
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=1,
maximum_number_of_timeshifts=None)
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=1,
maximum_number_of_timeshifts=1)
self.assertListEqual(list(df["id"]), correct_indices[3:])
self.assertListEqual(list(df["a"].values), correct_values_a[3:])
self.assertListEqual(list(df["b"].values), correct_values_b[3:])
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=1,
maximum_number_of_timeshifts=2)
self.assertListEqual(list(df["id"]), correct_indices[1:])
self.assertListEqual(list(df["a"].values), correct_values_a[1:])
self.assertListEqual(list(df["b"].values), correct_values_b[1:])
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=1,
maximum_number_of_timeshifts=4)
self.assertListEqual(list(df["id"]), correct_indices[:])
self.assertListEqual(list(df["a"].values), correct_values_a[:])
self.assertListEqual(list(df["b"].values), correct_values_b[:])
def test_negative_rolling(self):
first_class = pd.DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": range(4)})
second_class = pd.DataFrame({"a": [10, 11], "b": [12, 13], "time": range(20, 22)})
first_class["id"] = 1
second_class["id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
correct_indices = (["id=1, shift=-3"] * 1 +
["id=1, shift=-2"] * 2 +
["id=1, shift=-1"] * 3 +
["id=2, shift=-1"] * 1 +
["id=1, shift=0"] * 4 +
["id=2, shift=0"] * 2)
correct_values_a = [4, 3, 4, 2, 3, 4, 11, 1, 2, 3, 4, 10, 11]
correct_values_b = [8, 7, 8, 6, 7, 8, 13, 5, 6, 7, 8, 12, 13]
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=-1)
self.assertListEqual(list(df["id"].values), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=-1,
maximum_number_of_timeshifts=None)
self.assertListEqual(list(df["id"].values), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=-1,
maximum_number_of_timeshifts=1)
self.assertListEqual(list(df["id"].values), correct_indices[3:])
self.assertListEqual(list(df["a"].values), correct_values_a[3:])
self.assertListEqual(list(df["b"].values), correct_values_b[3:])
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=-1,
maximum_number_of_timeshifts=2)
self.assertListEqual(list(df["id"].values), correct_indices[1:])
self.assertListEqual(list(df["a"].values), correct_values_a[1:])
self.assertListEqual(list(df["b"].values), correct_values_b[1:])
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=-1,
maximum_number_of_timeshifts=4)
self.assertListEqual(list(df["id"].values), correct_indices[:])
self.assertListEqual(list(df["a"].values), correct_values_a[:])
self.assertListEqual(list(df["b"].values), correct_values_b[:])
def test_stacked_rolling(self):
first_class = pd.DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": range(4)})
second_class = pd.DataFrame({"a": [10, 11], "b": [12, 13], "time": range(20, 22)})
first_class["id"] = 1
second_class["id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
df_stacked = pd.concat([df_full[["time", "id", "a"]].rename(columns={"a": "_value"}),
df_full[["time", "id", "b"]].rename(columns={"b": "_value"})], ignore_index=True)
df_stacked["kind"] = ["a"] * 6 + ["b"] * 6
df = dataframe_functions.roll_time_series(df_stacked, column_id="id", column_sort="time",
column_kind="kind", rolling_direction=-1)
correct_indices = (["id=1, shift=-3"] * 2 +
["id=1, shift=-2"] * 4 +
["id=1, shift=-1"] * 6 +
["id=2, shift=-1"] * 2 +
["id=1, shift=0"] * 8 +
["id=2, shift=0"] * 4)
self.assertListEqual(list(df["id"].values), correct_indices)
self.assertListEqual(list(df["kind"].values), ["a", "b"] * 13)
self.assertListEqual(list(df["_value"].values),
[4, 8, 3, 7, 4, 8, 2, 6, 3, 7, 4, 8, 11, 13, 1, 5, 2, 6, 3, 7, 4, 8, 10, 12, 11, 13])
def test_dict_rolling(self):
df_dict = {
"a": pd.DataFrame({"_value": [1, 2, 3, 4, 10, 11], "id": [1, 1, 1, 1, 2, 2]}),
"b": pd.DataFrame({"_value": [5, 6, 7, 8, 12, 13], "id": [1, 1, 1, 1, 2, 2]})
}
df = dataframe_functions.roll_time_series(df_dict, column_id="id", column_sort=None,
column_kind=None, rolling_direction=-1)
correct_indices = (["id=1, shift=-3"] * 1 +
["id=1, shift=-2"] * 2 +
["id=1, shift=-1"] * 3 +
["id=2, shift=-1"] * 1 +
["id=1, shift=0"] * 4 +
["id=2, shift=0"] * 2)
self.assertListEqual(list(df["a"]["id"].values), correct_indices)
self.assertListEqual(list(df["b"]["id"].values), correct_indices)
self.assertListEqual(list(df["a"]["_value"].values),
[4, 3, 4, 2, 3, 4, 11, 1, 2, 3, 4, 10, 11])
self.assertListEqual(list(df["b"]["_value"].values),
[8, 7, 8, 6, 7, 8, 13, 5, 6, 7, 8, 12, 13])
def test_warning_on_non_uniform_time_steps(self):
with warnings.catch_warnings(record=True) as w:
first_class = pd.DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": [1, 2, 4, 5]})
second_class = pd.DataFrame({"a": [10, 11], "b": [12, 13], "time": range(20, 22)})
first_class["id"] = 1
second_class["id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=1)
self.assertEqual(len(w), 1)
self.assertEqual(str(w[0].message),
"Your time stamps are not uniformly sampled, which makes rolling "
"nonsensical in some domains.")
class CheckForNanTestCase(TestCase):
def test_all_columns(self):
test_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=[0, 1])
# should not raise an exception
dataframe_functions.check_for_nans_in_columns(test_df)
test_df = pd.DataFrame([[1, 2, 3], [4, np.NaN, 6]], index=[0, 1])
self.assertRaises(ValueError, dataframe_functions.check_for_nans_in_columns, test_df)
def test_not_all_columns(self):
test_df = pd.DataFrame([[1, 2, 3], [4, np.NaN, 6]], index=[0, 1], columns=["a", "b", "c"])
self.assertRaises(ValueError, dataframe_functions.check_for_nans_in_columns, test_df)
self.assertRaises(ValueError, dataframe_functions.check_for_nans_in_columns, test_df, ["a", "b"])
self.assertRaises(ValueError, dataframe_functions.check_for_nans_in_columns, test_df, ["b"])
self.assertRaises(ValueError, dataframe_functions.check_for_nans_in_columns, test_df, "b")
self.assertRaises(ValueError, dataframe_functions.check_for_nans_in_columns, test_df, ["c", "b"])
dataframe_functions.check_for_nans_in_columns(test_df, columns=["a", "c"])
dataframe_functions.check_for_nans_in_columns(test_df, columns="a")
class ImputeTestCase(TestCase):
def test_impute_zero(self):
df = pd.DataFrame([{"value": np.NaN}])
dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(list(df.value), [0])
df = pd.DataFrame([{"value": np.PINF}])
dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(list(df.value), [0])
df = pd.DataFrame([{"value": np.NINF}])
dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(list(df.value), [0])
df = pd.DataFrame([{"value": np.NINF}, {"value": np.NaN}, {"value": np.PINF}, {"value": 1}])
dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(list(df.value), [0, 0, 0, 1])
df = pd.DataFrame([{"value": np.NINF}, {"value": np.NaN}, {"value": np.PINF}, {"value": 1}])
df = df.astype(np.float64)
df = dataframe_functions.impute_dataframe_zero(df)
self.assertEqual(list(df.value), [0, 0, 0, 1])
df =
|
pd.DataFrame([{"value": np.NINF}, {"value": np.NaN}, {"value": np.PINF}, {"value": 1}])
|
pandas.DataFrame
|
import openpyxl
import pandas as pd
from datetime import datetime, timedelta
import xlsxwriter
now = datetime.now()
date_time = now.strftime("%m_%d_%Y %I_%M_%p")
federal_tax_rate_path = "./federaltaxrates.csv"
state_tax_rate_path = "./statetaxrates.csv"
city_tax_rate_path = "./NYCtaxrates.csv"
# calculate social security tax
class EffectiveFederalTax:
def __init__(self, salary, marital_status):
self.salary = salary
self.marital_status = marital_status
def calculateSocialSecurityTaxDue(self):
if self.salary >= 147000:
return 9114
else:
return round(self.salary * 0.062, 2)
# calculate federal income tax + remainder of fica (medicare) for single filers
class EffectiveFederalTaxSingle(EffectiveFederalTax):
def __init__(self, salary, deductions):
super().__init__(salary, "single")
self.deductions = deductions
def calculateFederalIncomeTaxDue(self):
federal_tax_rate_table = pd.read_csv(federal_tax_rate_path)
federal_tax_bracket_tier = 0
single_income_column = federal_tax_rate_table.columns.get_loc("Single Income")
single_income_percentage_tax_column = federal_tax_rate_table.columns.get_loc("Single Tax Rate")
max_index = len(list(federal_tax_rate_table.index)) - 1
while federal_tax_bracket_tier <= max_index and \
int(federal_tax_rate_table.iloc[federal_tax_bracket_tier, single_income_column]) < \
(self.salary - self.deductions):
federal_tax_bracket_tier += 1
federal_tax_bracket_tier -= 1
federal_tax_due = 0
counter = 0
while counter <= federal_tax_bracket_tier - 1:
federal_tax_due += (federal_tax_rate_table.iloc[counter + 1, single_income_column]
- federal_tax_rate_table.iloc[counter, single_income_column])\
* (float((federal_tax_rate_table.iloc[counter, single_income_percentage_tax_column])
.strip("%")) / 100)
counter += 1
marginal_tax_due = (self.salary - self.deductions - federal_tax_rate_table.iloc[federal_tax_bracket_tier,
single_income_column]) \
* (float((federal_tax_rate_table.iloc[federal_tax_bracket_tier,
single_income_percentage_tax_column]).strip("%")) / 100)
federal_tax_due += marginal_tax_due
return round(federal_tax_due, 2)
def calculateMedicareTaxDue(self):
if self.salary <= 200000:
return round(self.salary * 0.0145, 2)
else:
return round(self.salary * 0.0145 + (self.salary - 200000) * 0.009, 2)
def calculateTotalFederalTaxesDue(self):
return self.calculateSocialSecurityTaxDue() + self.calculateFederalIncomeTaxDue() \
+ self.calculateMedicareTaxDue()
# calculate federal income tax + remainder of fica (medicare) for married filers
class EffectiveFederalTaxMarried(EffectiveFederalTax):
def __init__(self, salary, deductions):
super().__init__(salary, "Married")
self.deductions = deductions
def calculateFederalIncomeTaxDue(self):
federal_tax_rate_table =
|
pd.read_csv(federal_tax_rate_path)
|
pandas.read_csv
|
#!/usr/bin/env python
"""
Determine optimal CP_GROUP value for CPMD MPI calculations.
This is based on empirical investigation and is not guarented
to give optimal performance at all!
The attempt here is:
* to have a uniform distribution of plane waves on all CP groups.
* to minimize CP_GROUP with that constraint, where CP_GROUP is the
number of MPI tasks per CP_GROUP, i.e. CP_GROUP size.
* to make the number of MPI tasks a multiple of the number of CPUs on a node.
"""
__author__="<NAME>"
__email__="<EMAIL>"
import pandas as pd
if __name__=="__main__":
max_nnodes = int(input("Maximum number of nodes that can be used : "))
# if a node has 16 CPUs this should probably be 16
ntasks_per_node = int(input("Number of MPI tasks per node : "))
# This can be obtained by submitting a dummy CPMD calculation
# with your system and plane wave cutoff. Look for "REAL SPACE MESH".
mesh = int(input("Size of the REAL SPACE MESH in X direction : "))
foundit = False
solutions = {
'Number of nodes':[],
'CP_GROUP (size of)':[],
'Number of PW per group':[],
}
# We just go brute force and try all possibilities
for N in range(1,max_nnodes+1):
# CP_GROUP has to be lower than the number of MPI tasks
ntasks = N * ntasks_per_node
for ntasks_per_cpgrp in range(1,ntasks):
# check that ntasks_per_cpgrp is a divisor of ntasks
if ntasks%ntasks_per_cpgrp == 0:
n_cp_groups = ntasks//ntasks_per_cpgrp
else:
continue
# check that the number of CP_GROUPs is a divisor of the mesh size
if mesh%n_cp_groups == 0:
# we have a winner
foundit = True
npw_per_cpgrp = mesh//n_cp_groups
solutions['Number of nodes'].append( N )
solutions['CP_GROUP (size of)'].append( ntasks_per_cpgrp )
solutions['Number of PW per group'].append( npw_per_cpgrp )
# this one is the first value of CP_GROUP that works with
# the current number of nodes. It is thuse the smalest value
# which is what we want, so we stop here for that number of nodes
break
else:
continue
print()
if foundit:
# convert solutions to a pandas data frame
# and sort it based on the number of plane waves
df =
|
pd.DataFrame(data=solutions)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#!/usr/bin/env python3
import pandas as pd
import sys
import numpy as np
import os
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
sys.path.append("/Users/tkaiser2/bin")
sys.path.append("/home/tkaiser2/bin")
from tymer import *
bins=np.zeros(10)
upper=[1,2,4,8,12,18,24,30,36,5000]
lower=[0,1,2,4,8,12,18,24,30,36]
bins=np.zeros(len(upper))
tymer(["-i","start"])
flist=open('zips','r')
people=flist.readlines()
#people=['jjenkins']
#for infile in sys.stdin:
tmin=10./3600
w=0.0
tj=0
infile=people[0].strip()
outfile=infile+"_jobs"
overs=pd.read_pickle(infile+".zip")
overs['who']=infile
overs=overs[0:0]
print(overs)
for infile in people:
infile=infile.strip()
outfile=infile+"_jobs"
#if os.path.exists(outfile):
# os.remove(outfile)
try:
jobs=pd.read_pickle(infile+".zip")
jw=sum(jobs.wait)
w=w+jw
tj=tj+len(jobs)
#print(jobs.wall)
#jobs.to_hdf(outfile, 'jobs')
#jobs.to_pickle(infile+".zip",protocol=4)
tjobs=len(jobs.loc[(jobs.wall > tmin)])
#print("%15s %12d" %(infile,tjobs))
for x in range(0,len(upper)-1) :
#print(x,lower[x],upper[x])
#c=len(jobs.loc[(jobs.cores >= lower[x]) & (jobs.cores < upper[x])])
c=len(jobs.loc[(jobs.cores >= lower[x]) & (jobs.cores < upper[x]) & (jobs.wall > tmin)])
bins[x]=bins[x]+c
#print("%5.2f %4d %5d" %(c*100,lower[x],upper[x]))
for x in range(len(upper)-1,len(upper)) :
#c=len(jobs.loc[(jobs.cores >= lower[x]) & (jobs.cores < upper[x])])
c=len(jobs.loc[(jobs.cores >= lower[x]) & (jobs.cores < upper[x]) & (jobs.wall > tmin)])
#print("***",x,lower[x],upper[x],c)
if (c > 0):
bonk=jobs.loc[(jobs.cores >= lower[x]) & (jobs.cores < upper[x]) & (jobs.wall > tmin)].copy(deep=True)
#print(bonk)
bonk['who']=infile
overs=overs.append(bonk,ignore_index=True)
bins[x]=bins[x]+c
except:
print(infile+" failed")
# tymer(["-i",infile])
tymer(["-i",infile])
tjobs=sum(bins)
bins=bins/tjobs
print("Total Jobs= "+str(tjobs))
print("%CPU usage")
print(" % >cores <cores")
for x in range(0,len(upper)) :
#c=len(jobs.loc[(jobs.cores >= lower[x]) & (jobs.cores < upper[x])])
bins[x]=bins[x]*100.0
print("%5.2f %4d %5d" %(bins[x],lower[x],upper[x]))
#print(w,tj,(w/tj)/3600.0)
# In[ ]:
flist=open('people','r')
flist=flist.readlines()
over_two=pd.DataFrame(columns=['Nodes','Count','Wait_sum','Wait_max'])
#print(over_jobs)
#for infile in people[0:10]:
for infile in people:
#for infile in people:
infile=infile.strip()
outfile=infile+"_jobs"
asum=pd.read_hdf(outfile)
nodeset=asum['NNodes'].unique()
#print(asum)
count=np.zeros(len(nodeset))
sumit=np.zeros(len(nodeset))
maxit=np.zeros(len(nodeset))
mynodes=np.zeros(len(nodeset))
k=0
for n in nodeset:
c=asum.loc[asum.NNodes == n]
count[k]=len(c)
sumit[k]=sum(c.wait)
maxit[k]=max(c.wait)
mynodes[k]=n
k=k+1
#print(infile)
#print(nodeset)
for nzip in zip(nodeset,count,sumit,maxit):
n,c,s,m=list(nzip)
#print(n,c,s,m)
if (len(over_two.loc[(over_two.Nodes == n)]) > 0):
myrow=over_two.loc[(over_two.Nodes == n)]
ind=myrow.index[0]
over_two.at[ind,'Count']=float(over_two.loc[[ind],['Count']].values[0])+c
over_two.at[ind,'Wait_sum']=s+float(over_two.loc[[ind],['Wait_sum']].values[0])
pre_max=float(over_two.loc[[ind],['Wait_max']].values[0])
if m > pre_max:
over_two.at[ind,'Wait_max']=m
else:
df=pd.DataFrame([[n,c,s,m]],columns=over_two.columns)
over_two=over_two.append(df,ignore_index=True)
over_two['Wait_ave']=over_two.Wait_sum/over_two.Count
over_two.sort_values(by=['Nodes'],inplace=True)
print_full(over_two)
# In[ ]:
from pylab import figure,show,save
import matplotlib.pyplot as plt
plt.bar(over_two.Nodes,over_two.Wait_ave)
# In[ ]:
upper=np.array([1,2,4,8,16,32,64,128,256,762,3000])
lower=np.array([0,1,2,4,8,16,32,64,128,256,762])
bins=np.zeros(len(upper))
upper=upper*1e9
lower=lower*1e9
tymer(["-i","start"])
flist=open('zips','r')
people=flist.readlines()
#people=['jjenkins']
#for infile in sys.stdin:
tmin=1./3600
w=0.0
tj=0
gpus=0
for infile in people:
infile=infile.strip()
outfile=infile+"_jobs"
#if os.path.exists(outfile):
# os.remove(outfile)
try:
jobs=pd.read_pickle(infile+".zip")
#print(jobs)
#print(jobs.wall)
#jobs.to_hdf(outfile, 'jobs')
#jobs.to_pickle(infile+".zip",protocol=4)
tjobs=len(jobs.loc[(jobs.wall > tmin)])
gjobs=len(jobs.loc[(jobs.wall > tmin) & (jobs.ReqGRES)])
gpus=gpus+gjobs
#print("%15s %12d" %(infile,tjobs))
for x in range(0,len(upper)) :
#print(x,lower[x],upper[x])
#c=len(jobs.loc[(jobs.cores >= lower[x]) & (jobs.cores < upper[x])])
c=len(jobs.loc[(jobs.mem >= lower[x]) & (jobs.mem < upper[x]) & (jobs.wall > tmin)])
bins[x]=bins[x]+c
c=c/tjobs
#print("%5.2f %4d %5d" %(c*100,lower[x],upper[x]))
except:
print(infile+" failed")
# tymer(["-i",infile])
tymer(["-i",infile])
tjobs=sum(bins)
bins=bins/tjobs
print("Total Jobs= "+str(tjobs))
upper=np.array([1,2,4,8,16,32,64,128,256,762,3000])
lower=np.array([0,1,2,4,8,16,32,64,128,256,762])
print("%Jobs Using memory in range")
print(" % >GB <GB")
for x in range(0,len(upper)) :
bins[x]=bins[x]*100.0
print("%5.2f %4d %5d" %(bins[x],lower[x],upper[x]))
print("GPU USAGE: total jobs %d or %3.2f%s" % (gpus, 100*gpus/tjobs,"%"))
# In[ ]:
tymer(["-i","Done with memory summary"])
# In[ ]:
flist=open('people','r')
flist=flist.readlines()
over_two=pd.DataFrame(columns=['Nodes','Count','Wall_sum'])
#print(over_jobs)
#for infile in people[0:10]:
tmin=10.0/3600
#for infile in ['tkaiser2']:
for infile in people:
infile=infile.strip()
outfile=infile+"_jobs"
asum=pd.read_hdf(outfile)
nodeset=asum['NNodes'].unique()
#print(asum)
count=np.zeros(len(nodeset))
sumit=np.zeros(len(nodeset))
maxit=np.zeros(len(nodeset))
mynodes=np.zeros(len(nodeset))
k=0
for n in nodeset:
c=asum.loc[(asum.wall > tmin) & (asum.NNodes == n)]
count[k]=len(c)
sumit[k]=sum(c.wall)
mynodes[k]=n
k=k+1
#print(infile)
#print(nodeset)
for nzip in zip(nodeset,count,sumit):
n,c,s=list(nzip)
#print(n,c,s,m)
if (len(over_two.loc[(over_two.Nodes == n)]) > 0):
myrow=over_two.loc[(over_two.Nodes == n)]
ind=myrow.index[0]
over_two.at[ind,'Count']=float(over_two.loc[[ind],['Count']].values[0])+c
over_two.at[ind,'Wall_sum']=s+float(over_two.loc[[ind],['Wall_sum']].values[0])
pre_max=float(over_two.loc[[ind],['Wall_sum']].values[0])
else:
df=pd.DataFrame([[n,c,s]],columns=over_two.columns)
over_two=over_two.append(df,ignore_index=True)
over_two['Wall_ave']=3600.0*over_two.Wall_sum/over_two.Count
over_two.sort_values(by=['Nodes'],inplace=True)
over_two.drop(columns='Wall_sum',inplace=True)
print_full(over_two)
tymer(["-i","Done with wall time summary"])
# In[ ]:
startd=['2019-11-01','2019-12-01','2020-01-01','2020-02-01','2020-03-01','2020-04-01','2020-05-01']
endd= ['2019-11-30','2019-12-31','2020-01-31','2020-02-29','2020-03-31','2020-04-30','2020-05-31']
month= ['11','12','01','02','03','04','05']
for st,en,mo in zip(startd,endd,month):
print(st,en,mo)
# In[ ]:
overs=pd.read_pickle("tkaiser2"+".zip")
overs
# In[ ]:
overs.ReqGRES.isnull().sum()
# In[ ]:
do_gpu=pd.DataFrame(columns=['who','GPU','Total'])
flist=open('zips','r')
people=flist.readlines()
for w in people:
w=w.strip()
overs=pd.read_pickle(w+".zip")
nope=overs.ReqGRES.isnull().sum()
total=len(overs)
gpu=total-nope
df=
|
pd.DataFrame([[w,gpu,total]],columns=do_gpu.columns)
|
pandas.DataFrame
|
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (
|
is_platform_32bit()
|
pandas.compat.is_platform_32bit
|
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(),
|
Timedelta('3 days')
|
pandas.Timedelta
|
# Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from beakerx_tabledisplay import TableDisplay, Table
import numpy as np
import pandas as pd
class TestTableDisplay(unittest.TestCase):
def test_NaT_support(self):
# given
df = pd.DataFrame(np.random.randn(5, 3), index=['a', 'c', 'e', 'f', 'h'], columns=['one', 'two', 'three'])
df['timestamp'] =
|
pd.Timestamp('20120101')
|
pandas.Timestamp
|
#!/usr/bin/env python3
####################################################################################
#-------------------------------CX-ASAP: cif_reading-------------------------------#
#---Authors: <NAME>, <NAME>, <NAME> & <NAME>---#
#----------------------------Python Implementation by AJT--------------------------#
#-------------------------------Project Design by JRP------------------------------#
#-----------------------Valuable Coding Support by KMS & DJE-----------------------#
####################################################################################
#----------Instructions for Use----------#
#This module will read and analyse all of the CIF files in the current folder and below in the directory tree
#If you would like a single CIF file analysed, place it in an otherwise empty folder
#If you would like multiple CIF files analysed, place all of them into a single folder or below in the directory tree
#----------Required Modules----------#
import os
import pandas as pd
import re
import yaml
import logbook
import pathlib
from CifFile import ReadCif
#----------Class Definition----------#
class CIF_File:
def __init__(self, location = 'temp', home_path = os.getcwd()):
config = Config()
self.cfg = config.cfg
self.conf_path = config.conf_path
self.logger = config.logger
#if location == 'temp':
#os.chdir(self.cfg['System_Parameters']['current_results_path'])
os.chdir(location)
#Sets up empty lists/dictionaries to later populate with data
self.cif_files = []
self.results = {}
self.errors = {}
self.structures_in_cif = []
self.successful_positions = []
#Sets these to 0 to reset from previous runs
self.cfg['System_Parameters']['Structures_in_each_CIF'] = self.structures_in_cif
self.cfg['System_Parameters']['Successful_Positions'] = self.successful_positions
with open (self.conf_path, 'w') as f:
yaml.dump(self.cfg, f, default_flow_style=False, Dumper=Nice_YAML_Dumper, sort_keys=False)
#Pulls parameters from the configuration file as necessary, and uses it to set up an empty dataframe
self.search_items = self.cfg['User_Parameters_Full_Pipeline']['Analysis_Requirements']['cell_parameters']
for item in self.search_items:
self.results[item] = []
self.errors[item] = []
self.data = pd.DataFrame()
self.temp_df = pd.DataFrame()
self.data2 = pd.DataFrame()
self.data3 = pd.DataFrame()
self.data4 = pd.DataFrame()
def sorted_properly(self, data):
#This function sorts the files/folders properly (ie going 0, 1, 2... 10 instead of 0, 1, 10, 2... etc)
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key)]
return sorted(data, key=alphanum_key)
def get_data(self):
#This function searches through all of the folders in the current working directory for a cif file
for dirName, subdirList, fileList in os.walk(os.getcwd()):
sorted_fileList = self.sorted_properly(fileList)
for files in sorted_fileList:
#Once a unique CIF file is identified (checks for duplicates), the name is appended to a list and the data_harvest function is run on it
if files.endswith('.cif') and files.lower() not in self.cif_files:
self.cif_files.append(files.lower())
self.logger.info(files)
cif_file = os.path.join(os.getcwd(), files)
temp_data, structures_in_cif_tmp, successful_positions_tmp, temp_bonds, temp_angles, temp_torsion = self.data_harvest(cif_file)
self.data = self.data.append(temp_data)
self.data2 = self.data2.append(temp_bonds)
self.data3 = self.data3.append(temp_angles)
self.data4 = self.data4.append(temp_torsion)
self.structures_in_cif.append(structures_in_cif_tmp)
for item in successful_positions_tmp:
self.successful_positions.append(item)
self.cfg['System_Parameters']['Structures_in_each_CIF'] = self.structures_in_cif
self.cfg['System_Parameters']['Successful_Positions'] = self.successful_positions
with open (self.conf_path, 'w') as f:
yaml.dump(self.cfg, f, default_flow_style=False, Dumper=Nice_YAML_Dumper, sort_keys=False)
return self.data, self.data2
def parameter_tidy(self, raw, item):
if '(' in raw:
temp = raw.split('(')
temp2 = temp[1].strip(')')
self.results[item].append(float(temp[0]))
if '.' in raw:
temp3 = temp[0].split('.')
self.errors[item].append(int(temp2)*10**-(int(len(temp3[1]))))
else:
self.errors[item].append(int(temp2))
else:
try:
self.results[item].append(float(raw))
except ValueError:
self.results[item].append(raw)
self.errors[item].append(0)
def generate_cif_list(self, df):
longer_cif_list = []
longer_data_blocks = []
if len(df) != 0:
for item in self.cif_list:
i = 0
while i < (len(df) / len(self.cif_list)):
longer_cif_list.append(item)
i += 1
for item in self.data_blocks:
i = 0
while i < (len(df) / len(self.data_blocks)):
longer_data_blocks.append(item)
i += 1
return longer_cif_list, longer_data_blocks
def data_harvest(self, cif_file):
#Resets the dataframes/dictionaries
for item in self.search_items:
self.results[item] = []
self.errors[item] = []
self.temp_df = pd.DataFrame()
self.bond_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import warnings
import numpy as np
from matplotlib import pyplot as plt
warnings.simplefilter("ignore")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
online = False # if True: download xml files from github URL
# be careful: online version will not work if requirements from requirements.txt are not satisfied!
if online:
url_link_302_19 = 'https://github.com/Hidancloud/risk_management_debt_forecast/' \
'blob/main/data_folder/302-19.xlsx?raw=true'
url_link_01_13_F_Debt_sme_subj = 'https://github.com/Hidancloud/risk_management_debt_forecast/' \
'blob/main/data_folder/01_13_F_Debt_sme_subj.xlsx?raw=true'
url_link_Interpolationexp2 = 'https://github.com/Hidancloud/risk_management_debt_forecast/' \
'blob/main/data_folder/Interpolationexp2.xlsx?raw=true'
def extract_data_before_2019y():
"""
Extracts data from the 302-19.xlsx file
:return: pandas dataframe with columns 'Дата', 'Задолженность', 'Просроченная задолженность'
"""
if online:
return pd.read_excel(url_link_302_19, usecols=[0, 5, 11], skiprows=list(range(7)),
names=['Дата', 'Задолженность', 'Просроченная задолженность'])
return pd.read_excel('data_folder/302-19.xlsx', usecols=[0, 5, 11], skiprows=list(range(7)),
names=['Дата', 'Задолженность', 'Просроченная задолженность'])
def extract_data_after_2018():
"""
Extracts data from the 01_13_F_Debt_sme_subj.xlsx file
:return: pandas dataframe with columns 'Дата', 'Задолженность', 'Просроченная задолженность'
"""
# read Задолженность from the page МСП Итого
# .T to make rows for entities and columns for properties
if online:
after_19y_debt = pd.read_excel(url_link_01_13_F_Debt_sme_subj, skiprows=1, nrows=1,
sheet_name='МСП Итого ').T
else:
after_19y_debt = pd.read_excel('data_folder/01_13_F_Debt_sme_subj.xlsx',
skiprows=1, nrows=1, sheet_name='МСП Итого ').T
after_19y_debt.reset_index(inplace=True)
# remove an odd row after transpose
after_19y_debt.drop(labels=0, axis=0, inplace=True)
after_19y_debt.columns = before_19y.columns[:2]
# change types of the columns for convenience
after_19y_debt[after_19y_debt.columns[0]] = pd.to_datetime(after_19y_debt[after_19y_debt.columns[0]])
after_19y_debt = after_19y_debt.astype({after_19y_debt.columns[1]: 'int32'}, copy=False)
# read Просроченная задолженность from the page МСП в т.ч. просроч.
if online:
after_19y_prosro4eno = pd.read_excel(url_link_01_13_F_Debt_sme_subj, skiprows=2, nrows=0,
sheet_name='МСП в т.ч. просроч.').T
else:
after_19y_prosro4eno = pd.read_excel('data_folder/01_13_F_Debt_sme_subj.xlsx', skiprows=2, nrows=0,
sheet_name='МСП в т.ч. просроч.').T
after_19y_prosro4eno.reset_index(inplace=True)
# remove an odd row after the transpose
after_19y_prosro4eno.drop(labels=0, axis=0, inplace=True)
# name the column
after_19y_prosro4eno.columns = ['Просроченная задолженность']
# concatenate Задолженность and Просроченная задолженность in one table and return it
return
|
pd.concat([after_19y_debt, after_19y_prosro4eno], axis=1)
|
pandas.concat
|
"""
"""
import sys
import argparse
import os
import time
import collections
import re
from six.moves import StringIO
import pandas
import tqdm # progress bar
tqdm.monitor_interval = 0 # see https://github.com/tqdm/tqdm/issues/481
import shellinford
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"peptides",
metavar="FILE.csv",
help="CSV of mass spec hits")
parser.add_argument(
"reference_csv",
metavar="FILE.csv",
help="CSV of protein sequences")
parser.add_argument(
"reference_index",
metavar="FILE.fm",
help="shellinford index over protein sequences")
parser.add_argument(
"--out",
metavar="OUT.csv",
help="Out file path")
parser.add_argument(
"--flanking-length",
metavar="N",
type=int,
default=15,
help="Length of flanking sequence to include")
parser.add_argument(
"--debug-max-rows",
metavar="N",
type=int,
default=None,
help="Max rows to process. Useful for debugging. If specified an ipdb "
"debugging session is also opened at the end of the script")
def run():
args = parser.parse_args(sys.argv[1:])
df = pandas.read_csv(args.peptides)
df["hit_id"] = "hit." + df.index.map('{0:07d}'.format)
df = df.set_index("hit_id")
print("Read peptides", df.shape, *df.columns.tolist())
reference_df =
|
pandas.read_csv(args.reference_csv, index_col=0)
|
pandas.read_csv
|
import os
import sys
import glob
import pickle as pkl
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from scipy.stats import ttest_rel
def load_stratified_prediction_results(results_dir, experiment_descriptor):
"""Load results of stratified prediction experiments.
Arguments
---------
results_dir (str): directory to look in for results, subdirectories should
be experiments for individual genes or cancer types
experiment_descriptor (str): string describing this experiment, can be
useful to segment analyses involving multiple
experiments or results sets
Returns
-------
results_df (pd.DataFrame): results of classification experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for identifier in results_dir.iterdir():
identifier_dir = Path(results_dir, identifier)
if identifier_dir.is_file(): continue
for results_file in identifier_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
# skip compressed files here, use load_compressed* functions
# to load that data separately
if check_compressed_file(results_filename): continue
if ('classify' not in results_filename or
'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
id_results_df = pd.read_csv(results_file, sep='\t')
id_results_df['experiment'] = experiment_descriptor
results_df = pd.concat((results_df, id_results_df))
return results_df
def load_compressed_prediction_results(results_dir,
experiment_descriptor,
old_filenames=False):
"""Load results of compressed prediction experiments.
Arguments
---------
results_dir (str): directory to look in for results, subdirectories should
be experiments for individual genes or cancer types
experiment_descriptor (str): string describing this experiment, can be
useful to segment analyses involving multiple
experiments or results sets
old_filenames (bool): use old filename format
Returns
-------
results_df (pd.DataFrame): results of classification experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for identifier in results_dir.iterdir():
identifier_dir = Path(results_dir, identifier)
if identifier_dir.is_file(): continue
for results_file in identifier_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if not check_compressed_file(results_filename): continue
if ('classify' not in results_filename or
'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
if old_filenames:
try:
n_dims = int(results_filename.split('_')[-3].replace('n', ''))
except ValueError:
n_dims = int(results_filename.split('_')[-2].replace('n', ''))
else:
n_dims = int(results_filename.split('_')[-2].replace('n', ''))
id_results_df = pd.read_csv(results_file, sep='\t')
id_results_df['n_dims'] = n_dims
id_results_df['experiment'] = experiment_descriptor
results_df = pd.concat((results_df, id_results_df))
return results_df
def load_purity_results(results_dir, classify=True):
"""Load results of tumor purity experiments.
Arguments
---------
results_dir (str): directory containing results files
Returns
-------
results_df (pd.DataFrame): results of prediction experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for results_file in results_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if classify and ('classify' not in results_filename
or 'metrics' not in results_filename): continue
if not classify and ('regress' not in results_filename
or 'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
id_results_df = pd.read_csv(results_file, sep='\t')
if check_compressed_file(results_filename):
id_results_df.training_data += '_compressed'
results_df = pd.concat((results_df, id_results_df))
return results_df
def load_msi_results(results_dir):
"""Load results of microsatellite instability prediction experiments.
Arguments
---------
results_dir (str): directory containing results files
Returns
-------
results_df (pd.DataFrame): results of prediction experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for results_file in results_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if ('classify' not in results_filename
or 'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
id_results_df = pd.read_csv(results_file, sep='\t')
# TODO: n_dims?
results_df =
|
pd.concat((results_df, id_results_df))
|
pandas.concat
|
import numpy as np
import pandas as pd
import neurokit2 as nk
signal = nk.signal_simulate(duration=10, sampling_rate=200, frequency=[5, 10], noise=0)
signal = nk.standardize(signal)
# nk.complexity_embedding(signal, delay=20, dimension=3, show=True)
def _compute_complexity(noise=0):
data = pd.DataFrame()
for noise_intensity in np.linspace(0.01, 2, 30):
x = nk.signal_noise(duration=10, sampling_rate=200, beta=noise)
sig = signal + (nk.standardize(x) * noise_intensity)
d, _ = nk.complexity(sig, which=["fast", "medium", "slow"], delay=20, dimension=3)
d = d.drop("ShanEn", axis=1)
d["ShanEn_2"], _ = nk.entropy_shannon(pd.cut(sig, 2, labels=False))
d["ShanEn_3"], _ = nk.entropy_shannon(pd.cut(sig, 3, labels=False))
d["ShanEn_10"], _ = nk.entropy_shannon(pd.cut(sig, 10, labels=False))
d["ShanEn_100"], _ = nk.entropy_shannon(pd.cut(sig, 100, labels=False))
d["ShanEn_1000"], _ = nk.entropy_shannon(pd.cut(sig, 1000, labels=False))
d["CREn_2"], _ = nk.entropy_cumulative_residual(pd.cut(sig, 2, labels=False))
d["CREn_3"], _ = nk.entropy_cumulative_residual(pd.cut(sig, 3, labels=False))
d["CREn_10"], _ = nk.entropy_cumulative_residual(pd.cut(sig, 10, labels=False))
d["CREn_100"], _ = nk.entropy_cumulative_residual(pd.cut(sig, 100, labels=False))
d["CREn_1000"], _ = nk.entropy_cumulative_residual(pd.cut(sig, 1000, labels=False))
d["PFD_a"], _ = nk.fractal_petrosian(sig, method="A")
d["PFD_b"], _ = nk.fractal_petrosian(sig, method="B")
d["PFD_c"] = d["PFD"] # already computed
d["PFD_d"], _ = nk.fractal_petrosian(sig, method="D")
d = d.drop(["PFD"], axis=1)
d["Noise"] = noise
d["Intensity"] = noise_intensity
data = pd.concat([data, d], axis=0)
return data
args = [{"noise": noise} for noise in np.linspace(-2, 2, 10)]
data = nk.parallel_run(_compute_complexity, args, verbose=10, n_jobs=-4) # -2
|
pd.concat(data)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 2 09:15:15 2019
PROGRAM PURPOSE:
Creates the followng graphics for the CTM:
-Table 7: Historical Households and Average Household Sizes for
XXXX County, 1980-2010
-Figure 4: Trend in Average Household Size for XXXX County, 1980-2010
@author: slq584
"""
import pandas as pd
import matplotlib.pyplot as plt
import paths as p
import API_call as api
import population_growth as pop
"""
get_excel_data() PURPOSE:
Get Number of Households and Average Household Size from Excel files
containing data that predates the 2000s
"""
def get_excel_data(data, county, decade):
#Dataframe to hold data
df = pd.DataFrame()
df = pd.read_excel(data)
#Subset the row based on county
subset_row = df[df['COUNTY'] == county]
if(decade == 1980):
#Assign information we are looking for to variables
household_number = subset_row['C75001'].values[0]
household_average = subset_row['Unnamed: 10'].values[0]
household_average = round(household_average, 2)
elif(decade == 1990):
#Assign information we are looking for to variables
household_number = subset_row['EUO001'].values[0]
household_average = subset_row['Unnamed: 12'].values[0]
household_average = round(household_average, 2)
#Package these variables in a Dataframe since we need to return
#more than one value
data_needed = pd.DataFrame([{'Number': household_number,
'Average': household_average}])
return data_needed
"""
table7_string_converter() PURPOSE:
Convert string into a dictionary for later implementation.
This function was made specifically for table 7.
"""
def table7_string_converter(string):
li = list(string.split(","))
#print(li)
li.pop(6)
#print(li)
subli1 = li[:4]
subli2 = li[4:]
conversion = dict(zip(subli1,subli2))
#print(conversion)
return conversion
"""
make_table7() PURPOSE:
Creates table 7 with data passed.
"""
def make_table7(county, county_data_2010, county_data_2000, county_data_2010_number,
county_data_2000_number, county_data_1990, county_data_1980):
#Assign data to variables for readability
household_average_2010 = county_data_2010['H012001']
household_average_2000 = county_data_2000['H012001']
number_2010 = county_data_2010_number['P020001']
number_2000 = county_data_2000_number['P020001']
#Put all data in a list so that the DataFrame declaration is readable
values = [county_data_1980['Number'].values[0], county_data_1980['Average'].values[0],
county_data_1990['Number'].values[0], county_data_1990['Average'].values[0],
number_2000, household_average_2000,
number_2010, household_average_2010,]
#Rename Columns and establish a MultiIndex
columns = pd.MultiIndex.from_product([['1980', '1990', '2000', '2010'],
['Number', 'Avg. HH Size']])
#Create DataFrame
table7 =
|
pd.DataFrame([values], columns=columns)
|
pandas.DataFrame
|
import numpy as np
import pickle as pkl
import scipy.sparse as sp
import torch
import torch.nn as nn
import random
from sklearn import metrics
import pandas as pd
def shuffle_nodes(items, masks):
node_num = masks.sum(axis=1)
batch_size = masks.shape[0]
node_max = masks.shape[1]
shuffle_item = items.copy()
for i in range(batch_size):
shuf_idx = np.append(np.random.permutation(node_num[i]), np.ones(node_max - node_num[i]) * (node_max - 1))
idx = shuf_idx.astype(np.int32)
shuffle_item[i] = shuffle_item[i, idx]
return shuffle_item
###############################################
# This section of code adapted from tkipf/gcn #
###############################################
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def macro_f1(preds, labels):
labels = labels.to(torch.device("cpu")).numpy()
preds = preds.to(torch.device("cpu")).numpy()
macro = metrics.f1_score(labels, preds, average='macro')
return macro
def sparse_to_tuple(sparse_mx, insert_batch=False):
"""Convert sparse matrix to tuple representation."""
"""Set insert_batch=True if you want to insert a batch dimension."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
if insert_batch:
coords = np.vstack((np.zeros(mx.row.shape[0]), mx.row, mx.col)).transpose()
values = mx.data
shape = (1,) + mx.shape
else:
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def standardize_data(f, train_mask):
"""Standardize feature matrix and convert to tuple representation"""
# standardize data
f = f.todense()
mu = f[train_mask == True, :].mean(axis=0)
sigma = f[train_mask == True, :].std(axis=0)
f = f[:, np.squeeze(np.array(sigma > 0))]
mu = f[train_mask == True, :].mean(axis=0)
sigma = f[train_mask == True, :].std(axis=0)
f = (f - mu) / sigma
return f
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features.todense(), sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
###############################################
# This section of code adapted from DGRec #
###############################################
def load_latest_session(data_path):
ret = []
for line in open(data_path + '/latest_sessions.txt'):
chunks = line.strip().split(',')
ret.append(chunks)
return ret
def load_map(data_path):
id_map = {}
for line in open(data_path):
k, v = line.strip().split(',')
id_map[k] = str(v)
map_num = len(id_map)
return map_num
def load_adj(data_path, dataset="Yelp"):
if dataset == "Yelp":
adj_social = sp.load_npz(data_path + "/meta_path/adj_user.npz")
mp_city = sp.load_npz(data_path + "/meta_path/mp_city.npz")
mp_category = sp.load_npz(data_path + "/meta_path/mp_category.npz")
# return [mp_iui, mp_social_dict, mp_category, mp_city]
adj_social = adj_social.tolil()
return [adj_social, mp_category.tolil(), mp_city.tolil()]
elif dataset == "Tmall":
mp_brand = sp.load_npz(data_path + "/meta_path/mp_brand.npz")
mp_seller = sp.load_npz(data_path + "/meta_path/mp_seller.npz")
mp_cate = sp.load_npz(data_path + "/meta_path/mp_category.npz")
return [mp_seller.tolil(), mp_brand.tolil(), mp_cate.tolil()]
elif dataset == "Nowplaying":
mp_artist = sp.load_npz(data_path + "/meta_path/mp_artist.npz")
mp_hashtag = sp.load_npz(data_path + "/meta_path/mp_hashtag.npz")
mp_context = sp.load_npz(data_path + "/meta_path/mp_context.npz")
return [mp_artist.tolil(), mp_hashtag.tolil(), mp_context.tolil()]
def load_data(path="./Yelp/processed/", dataset="Yelp"):
latest_sessions = load_latest_session(path)
mp_adj_list = load_adj(path, dataset)
mp_test_adj = load_adj(path + '/test', dataset)
# mp_adj_list = mp_test_adj
if dataset == "Yelp":
business_file = path + '/business_id_map.csv'
user_file = path + '/user_id_map.csv'
city_file = path + '/city_id_map.csv'
category_file = path + '/category_id_map.csv'
business_num = load_map(data_path=business_file)
user_num = load_map(data_path=user_file)
city_num = load_map(data_path=city_file)
category_num = load_map(data_path=category_file)
num_list = [business_num, user_num, city_num, category_num]
train = pd.read_csv(path + '/train.csv', sep=',', dtype={0: str, 1: str, 2: str, 3: str, 4: str, 5: str, 6: str,
7: str})
valid = pd.read_csv(path + '/valid.csv', sep=',', dtype={0: str, 1: str, 2: str, 3: str, 4: str, 5: str,
6: str, 7: str})
test = pd.read_csv(path + '/test.csv', sep=',', dtype={0: str, 1: str, 2: str, 3: str, 4: str, 5: str, 6: str,
7: str})
# return adjs, features, labels, idx_train, idx_val, idx_test
elif dataset == "Tmall":
train = pd.read_csv(path + '/train.csv', sep=',', dtype={0: str, 1: int, 2: int, 3: int})
valid = pd.read_csv(path + '/valid.csv', sep=',', dtype={0: str, 1: int, 2: int, 3: int})
test = pd.read_csv(path + '/test.csv', sep=',', dtype={0: str, 1: int, 2: int, 3: int})
df_concat = pd.concat([train, valid, test])
brand_df = pd.read_csv(path + 'item_brand.csv', sep=',', dtype={0: int, 1: int})
seller_df = pd.read_csv(path + 'item_seller.csv', sep=',', dtype={0: int, 1: int})
cate_df = pd.read_csv(path + 'item_category.csv', sep=',', dtype={0: int, 1: int})
business_num = df_concat['item_id'].nunique()
seller_num = seller_df['seller_id'].nunique()
brand_num = brand_df['brand_id'].nunique()
category_num = cate_df['cat_id'].nunique()
num_list = [business_num, seller_num, brand_num, category_num]
elif dataset == 'Nowplaying':
train = pd.read_csv(path + '/train.csv', sep=',', dtype={0: int, 1: int, 2: int, 3: str})
valid = pd.read_csv(path + '/valid.csv', sep=',', dtype={0: int, 1: int, 2: int, 3: str})
test = pd.read_csv(path + '/test.csv', sep=',', dtype={0: int, 1: int, 2: int, 3: str})
df_concat = pd.concat([train, valid, test])
artist_df = pd.read_csv(path + '/artist.csv', sep=',', dtype={0: int, 1: int})
hashtag_df = pd.read_csv(path + '/hashtag.csv', sep=',', dtype={0: int, 1: int})
context_df =
|
pd.read_csv(path + '/context.csv', sep=',', dtype={0: int, 1: int})
|
pandas.read_csv
|
"""
Pull my Garmin sleep data via json requests.
This script was adapted from: https://github.com/kristjanr/my-quantified-sleep
The aforementioned code required the user to manually define
headers and cookies. It also stored all of the data within Night objects.
My modifications include using selenium to drive a Chrome browser. This avoids
the hassle of getting headers and cookies manually (the cookies would have to be updated
everytime the Garmin session expired). It also segments data requests because
Garmin will respond with an error if more than 32 days are requested at once. Lastly,
data is stored as a pandas dataframe and then written to a user-defined directory
as a pickle file.
Data is this processed and merged with older data from my Microsft smartwatch.
The merged data is also saved as pandas dataframes in pickle files.
Lastly, sunrise and sunset data is downloaded for all days in the sleep dataset.
This data is also archived as a pandas dataframe and saved as a pickle file.
The data update process hs been broken into steps so that progress can be passed
to the Dash app.
"""
# import base packages
import datetime, json, os, re, sys
from itertools import chain
from os.path import isfile
# import installed packages
import pytz, requests, chardet, brotli
import numpy as np
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from seleniumwire import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
# input variables
if os.name == "nt":
# running on my local Windows machine
ENV = "local"
else:
# running on heroku server
ENV = "heroku"
if ENV == "local":
proj_path = "C:/Users/adiad/Anaconda3/envs/SleepApp/sleep_app/" # read/write data dir
else:
proj_path = ""
GOOGLE_CHROME_PATH = '/app/.apt/usr/bin/google-chrome'
CHROMEDRIVER_PATH = '/app/.chromedriver/bin/chromedriver'
garmin_results_pkl_fn = "data/garmin_sleep_df.pkl" # name of pickle file to archive (combining new results with any previous Garmin) for easy updating and subsequent processing
garmin_results_json_fn = "data/new_garmin_sleep.json" # name of json file with only new raw results
garmin_results_csv_fn = "data/garmin_sleep_df.csv" # name of csv file to archive (combining new results with any previous)
all_descr_results_fn = "data/all_sleep_descr_df.pkl" # name of pickle file combining all Garmin & Microsift sleep session description data
all_event_results_fn = "data/all_sleep_event_df.pkl" # name of pickle file combining all Garmin & Microsoft event data
sun_pkl_fn = "data/sun_df.pkl" # name of pickel file to archive sunrise/sunset data
local_tz = "US/Eastern" # pytz local timezone for sunrise/sunset time conversion
sun_lat = 39.76838 # latitude where sunrise/sunset times are derived from
sun_lon = -86.15804 # longitude where sunrise/sunset times are derived from
run_browser_headless = False # will hide Firefox during execution if True
browser_action_timeout = 60 # max time (seconds) for browser wait operations
start_date = '2017-03-01' # first date to pull sleep data
end_date = str(datetime.date.today() - datetime.timedelta(days=1)) # last date to pull sleep data
user_name = "email address" # Garmin username
password = "password" # Garmin password
signin_url = "https://connect.garmin.com/signin/" # Garmin sign-in webpage
sleep_url_base = "https://connect.garmin.com/modern/sleep/" # Garmin sleep base URL (sans date)
sleep_url_json_req = "https://connect.garmin.com/modern/proxy/wellness-service/wellness/dailySleepsByDate"
def download(start_date, end_date, headers, session_id):
params = (
('startDate', start_date),
('endDate', end_date),
('_', session_id),
)
response = requests.get(sleep_url_json_req, headers=headers, params=params)
if response.status_code != 200:
print("RESPONSE ERROR RECEIVED:")
print('Status code: %d' % response.status_code)
response_dict = json.loads(response.content.decode('UTF-8'))
print('Content: %s' % response_dict["message"])
raise Exception
return response
def download_to_json(start_date, end_date, headers, session_id):
response = download(start_date, end_date, headers, session_id)
# most responses are in ascii (no encoding)
# sporadically a response will have brotli encoding
#print("The response is encoded with:", chardet.detect(response.content))
if chardet.detect(response.content)["encoding"] == 'ascii':
return json.loads(response.content)
else:
return brotli.decompress(response.content)
def converter(data, return_df=True):
# define functions which pass through None value because
# datetime functions don't accept value None
def sleep_timestamp(val):
if val is None:
return None
else:
return datetime.datetime.fromtimestamp(val / 1000, pytz.utc)
def sleep_timedelta(val):
if val is None:
return None
else:
return datetime.timedelta(seconds=val)
# initialize variables
if return_df:
nights = pd.DataFrame(columns=["Prev_Day", "Bed_Time", "Wake_Time",
"Awake_Dur", "Light_Dur", "Deep_Dur",
"Total_Dur", "Nap_Dur", "Window_Conf"])
i = 0
else:
nights = []
for d in data:
bed_time = sleep_timestamp(d['sleepStartTimestampGMT'])
wake_time = sleep_timestamp(d['sleepEndTimestampGMT'])
previous_day = datetime.date(*[int(datepart) for datepart in d['calendarDate'].split('-')]) - datetime.timedelta(days=1)
deep_duration = sleep_timedelta(d['deepSleepSeconds'])
light_duration = sleep_timedelta(d['lightSleepSeconds'])
total_duration = sleep_timedelta(d['sleepTimeSeconds'])
awake_duration = sleep_timedelta(d['awakeSleepSeconds'])
nap_duration = sleep_timedelta(d['napTimeSeconds'])
window_confirmed = d['sleepWindowConfirmed']
if return_df:
nights.loc[i] = [previous_day, bed_time, wake_time, awake_duration,
light_duration, deep_duration, total_duration,
nap_duration, window_confirmed]
i += 1
else:
night = Night(bed_time, wake_time, previous_day, deep_duration,
light_duration, total_duration, awake_duration)
nights.append(night, sort=True)
return nights
# this function returns a list of all dates in [date1, date2]
def daterange(date1, date2):
date_ls = [date1]
for n in range(int((date2 - date1).days)):
date_ls.append(date_ls[-1] + datetime.timedelta(days=1))
return date_ls
# steps to updating sleep data:
# Step 0: determine which dates are missing in the archived Garmin dataset,
# given the input start & end dates
# Step 1: Login to connect.garmin.com, get user setting credentials
# Step 2: Using credentials, download missing data from Garmin in json
# Step 3: process new Garmin data, merge it with archived data
# Step 4: download sunrise/sunset data for new dates and merge with archived data
def step0():
# make a list of all dates from first sleep date to last (fills any missing dates)
req_dates_ls = daterange(
datetime.datetime.strptime(start_date, "%Y-%m-%d").date(),
datetime.datetime.strptime(end_date, "%Y-%m-%d").date()
)
# Look for previous results
if isfile(proj_path + garmin_results_pkl_fn):
nights_df = pd.read_pickle(proj_path + garmin_results_pkl_fn)
else:
nights_df = pd.DataFrame()
# if previous results were found, reduce requested dates to those not yet obtained
if len(nights_df) > 0:
# get list of requested dates not yet obtained
archive_dates_ls = list(nights_df["Prev_Day"])
new_req_dates_ls = np.setdiff1d(req_dates_ls, archive_dates_ls)
else:
new_req_dates_ls = req_dates_ls
#print("Archive max: ", max(archive_dates_ls))
#print("Request max: ", max(req_dates_ls))
if len(new_req_dates_ls) == 0:
msg = "Archived data is up to date, no new data is available"
else:
msg = "Current data was checked and " + str(len(new_req_dates_ls)) + " night(s) are needed"
return [msg, nights_df, new_req_dates_ls]
def step1():
opts = webdriver.ChromeOptions()
opts.add_argument('--disable-gpu')
opts.add_argument('--no-sandbox')
opts.add_argument('--disable-dev-shm-usage')
if ENV == "local":
if run_browser_headless:
opts.addArgument("headless")
assert opts.headless # Operating in headless mode
else:
opts.binary_location = GOOGLE_CHROME_PATH
# open firefox and goto Garmin's sign-in page
print("Opening Chrome browser")
driver = webdriver.Chrome(chrome_options=opts)
driver.get(signin_url)
# wait until sign-in fields are visible
wait = WebDriverWait(driver, browser_action_timeout)
wait.until(ec.frame_to_be_available_and_switch_to_it(("id","gauth-widget-frame-gauth-widget")))
wait.until(ec.presence_of_element_located(("id","username")))
# write login info to fields, then submit
print("Signing in to connect.garmin.com")
element = driver.find_element_by_id("username")
driver.implicitly_wait(5)
element.send_keys(user_name)
element = driver.find_element_by_id("password")
element.send_keys(password)
element.send_keys(Keys.RETURN)
wait.until(ec.url_changes(signin_url)) # wait until landing page is requested
driver.switch_to.default_content() # get out of iframe
# get dummy webpage to obtain all request headers
print("Loading dummy page to obtain headers")
driver.get(sleep_url_base + start_date)
request = driver.wait_for_request(sleep_url_base + start_date,
timeout=browser_action_timeout)
if (request.response.status_code != 200) | (~ hasattr(request, "headers")):
print("RESPONSE ERROR RECEIVED:")
if (request.response.status_code != 200):
print("Status code: %d" % request.response.status_code)
#response_dict = json.loads(request.content.decode('UTF-8'))
print("Reason: ", request.response.reason)
if (~ hasattr(request, "headers")):
print("Request did not have 'headers' attribute")
print("Request attributes: ", dir(request))
print("Request headers: ", request.headers)
#raise Exception
# close the Firefox browser
driver.close()
msg = "Logged in to connect.garmin.com"
return [msg, request]
def step2(request, new_req_dates_ls):
# transfer request headers
headers = {
"cookie": request.headers["Cookie"],
"referer": sleep_url_base + start_date,
"accept-encoding": request.headers["Accept-Encoding"],
"accept-language": "en-US", # request.headers["Accept-Language"],
"user-agent": request.headers["User-Agent"],
#"nk": "NT",
"accept": request.headers["Accept"],
"authority": request.headers["Host"],
#"x-app-ver": "4.25.3.0",
"upgrade-insecure-requests": request.headers["Upgrade-Insecure-Requests"]
}
# get the session id from the headers
re_session_id = re.compile("(?<=\$ses_id:)(\d+)")
session_id = re_session_id.search(str(request.headers)).group(0)
# Garmin will throw error if request time span exceeds 32 days
# therefore, request 32 days at a time
max_period_delta = datetime.timedelta(days=31)
data = [] # list of jsons, one per time period
get_dates_ls = new_req_dates_ls
while len(get_dates_ls) > 0:
period_start = min(get_dates_ls)
if (max(get_dates_ls) - period_start) > (max_period_delta - datetime.timedelta(days=1)):
period_end = period_start + max_period_delta
else:
period_end = max(get_dates_ls)
# note, this may request some dates which were already obtained
# since a contiguous period is being requested rather than 32 new dates
# duplicated dates will be dropped later
print("Getting data for period: [%s, %s]" % (period_start, period_end))
data.append(download_to_json(period_start, period_end, headers, session_id))
# trim dates list
get_dates_ls = [d for d, s in zip(get_dates_ls, np.array(get_dates_ls) > period_end) if s]
# combine list of jsons into one large json
data = list(chain.from_iterable(data))
# save raw Garmin json to project folder
with open(proj_path + garmin_results_json_fn, 'w') as fp:
json.dump(data, fp)
msg = "Data has been downloaded from Garmin"
return [msg, data]
def step3(nights_df, data, new_req_dates_ls):
# clean the new garmin data
new_nights_df = converter(data)
new_nights_df["Prev_Day"] = pd.to_datetime(new_nights_df["Prev_Day"])
if pd.to_datetime(new_nights_df["Bed_Time"]).dt.tz is None:
new_nights_df["Bed_Time"] = pd.to_datetime(new_nights_df["Bed_Time"]). \
dt.tz_localize(local_tz)
else:
new_nights_df["Bed_Time"] = pd.to_datetime(new_nights_df["Bed_Time"]). \
dt.tz_convert(local_tz)
if pd.to_datetime(new_nights_df["Wake_Time"]).dt.tz is None:
new_nights_df["Wake_Time"] = pd.to_datetime(new_nights_df["Wake_Time"]). \
dt.tz_localize(local_tz)
else:
new_nights_df["Wake_Time"] = pd.to_datetime(new_nights_df["Wake_Time"]). \
dt.tz_convert(local_tz)
new_nights_df["Light_Dur"] = pd.to_timedelta(new_nights_df["Light_Dur"], "days")
new_nights_df["Deep_Dur"] = pd.to_timedelta(new_nights_df["Deep_Dur"], "days")
new_nights_df["Total_Dur"] = pd.to_timedelta(new_nights_df["Total_Dur"], "days")
new_nights_df["Nap_Dur"] = pd.to_timedelta(new_nights_df["Nap_Dur"], "days")
# fill df with missing dates so that subsequent updates won't keep
# requesting data which Garmin doesn't have
new_missing_dates_ls = np.setdiff1d(new_req_dates_ls, new_nights_df["Prev_Day"].dt.date)
new_missing_row = [pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, np.NAN]
for d in new_missing_dates_ls:
new_nights_df.loc[len(new_nights_df)] = [d] + new_missing_row
# drop any nights which were already in the archived pickle file,
# then merge it with archived data
if len(nights_df) > 0:
new_nights_df = new_nights_df[~new_nights_df["Prev_Day"].isin(nights_df["Prev_Day"])]
nights_df = nights_df.append(new_nights_df, sort=True).sort_values("Prev_Day", axis=0)
else:
nights_df = new_nights_df.sort_values("Prev_Day", axis=0)
# trim most recent nights which have NaT durations because they were likely caused
# by the smartwatch not yet having synced with Garmin for those dates
unknown_nights_ls = []
i = 1
while pd.isnull(nights_df.Total_Dur.iloc[-i]) & (len(nights_df) >= i):
unknown_nights_ls.append(nights_df.Prev_Day.iloc[-i])
i += 1
nights_df = nights_df[~nights_df["Prev_Day"].isin(unknown_nights_ls)]
# save merged results
#nights_df.to_csv(proj_path + garmin_results_csv_fn)
nights_df.to_pickle(proj_path + garmin_results_pkl_fn)
# clean garmin data for dashboard
garmin_df = nights_df.drop(["Nap_Dur", "Window_Conf"], axis=1)
# calculate time of day in decimal hours of each event (asleep & wake)
garmin_df["Bed_ToD"] = garmin_df["Bed_Time"].dt.hour + garmin_df["Bed_Time"].dt.minute/60
garmin_df["Bed_ToD"] -= 24*(garmin_df["Bed_ToD"] > 12) # make PM bed times negative
garmin_df["Wake_ToD"] = garmin_df["Wake_Time"].dt.hour + garmin_df["Wake_Time"].dt.minute/60
# read & wrangle old microsoft sleep data
ms2015_df = pd.read_csv(proj_path + "data/Activity_Summary_20150101_20151231.csv")
ms2016_df = pd.read_csv(proj_path + "data/Activity_Summary_20160101_20161231.csv")
ms2017_df = pd.read_csv(proj_path + "data/Activity_Summary_20170101_20171231.csv")
ms_df = ms2015_df.append(ms2016_df).append(ms2017_df, sort=True). \
query("Event_Type == 'Sleep'")
ms2_df = pd.DataFrame()
# create microsoft dataframe which mimics the garmin dataframe
ms2_df["Prev_Day"] = pd.to_datetime(ms_df["Date"])
ms2_df["Bed_Time"] = pd.to_datetime(ms_df["Start_Time"]). \
dt.tz_localize("US/Eastern", ambiguous="NaT")
for i_row in range(len(ms2_df)-1):
# fell asleep after midnght, adjust Prev_Day back 1 day
if ms2_df.iloc[i_row, 1].hour < 12:
ms2_df.iloc[i_row, 0] -= datetime.timedelta(days=1)
ms2_df["Wake_Time"] = pd.to_datetime(ms_df["Wake_Up_Time"]). \
dt.tz_localize("US/Eastern", ambiguous="NaT")
ms2_df["Light_Dur"] = pd.to_timedelta(ms_df["Seconds_Asleep_Light"], "seconds")
ms2_df["Deep_Dur"] = pd.to_timedelta(ms_df["Seconds_Asleep_Restful"], "seconds")
ms2_df["Total_Dur"] = pd.to_timedelta(ms_df["Seconds_Awake"], "seconds") \
+ ms2_df["Light_Dur"] + ms2_df["Deep_Dur"]
ms2_df["Bed_ToD"] = ms2_df["Bed_Time"].dt.hour \
+ ms2_df["Bed_Time"].dt.minute/60
ms2_df["Bed_ToD"] -= 24*(ms2_df["Bed_ToD"] > 12) # make PM bed times negative
ms2_df["Wake_ToD"] = ms2_df["Wake_Time"].dt.hour \
+ ms2_df["Wake_Time"].dt.minute/60
brief_sleep_bool = ms2_df["Total_Dur"] < pd.Timedelta(4, unit="h")
daytime_asleep_bool = (ms2_df["Bed_ToD"] > -3) | (ms2_df["Bed_ToD"] < 7)
unknown_dur_bool = pd.isnull(ms2_df["Total_Dur"])
nap_bool = brief_sleep_bool & daytime_asleep_bool
ms3_df = ms2_df.loc[~nap_bool & ~unknown_dur_bool, :]
# combine garmin and microsoft data
all_df = garmin_df.append(ms3_df, sort=True)
all_df["Prev_Day"] =
|
pd.to_datetime(all_df["Prev_Day"])
|
pandas.to_datetime
|
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.graphics.api import qqplot
from sklearn.metrics import mean_squared_error as mse
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
print(sm.datasets.sunspots.NOTE)
dta = sm.datasets.sunspots.load_pandas().data
dta.index = pd.Index(sm.tsa.datetools.dates_from_range('1700', '2008'))
del dta["YEAR"]
# In[11]:
dta.head()
# In[2]:
dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d')
dta2 = pd.read_csv('sampled_ts_train.csv', parse_dates=['tran_date'], index_col='tran_date',date_parser=dateparse)
dta2 = dta2.drop(['store_key', 'sku_key', 'selling_price', 'avg_discount', 'store_region', 'store_grading', 'sku_department', 'sku_subdepartment', 'sku_category', 'sku_subcategory'], axis = 1)
dta2.head()
# In[3]:
dta2.plot(figsize=(30,30))
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
# In[4]:
#dta2.sales = dta2.sales.astype(int32)
dta2['sales'] = pd.DataFrame(dta2['sales'], dtype='float64')
# In[5]:
dta2.info()
# In[6]:
#split into test and train
percentage = 0.6
series = dta2['sales'].tolist()
size = int(len(series) * 0.66)
train, test = series[0:size], series[size:len(series)]
model = ARIMA(train , order = (9,0,0))
model_fit = model.fit()
# In[7]:
from statsmodels.tsa.stattools import acf, pacf
acf_1 = acf(series)[1:20]
plt.plot(acf_1)
test_df = pd.DataFrame([acf_1]).T
test_df.columns = ["Pandas Autocorrelation"]
test_df.index += 1
test_df.plot(kind='bar')
pacf_1 = pacf(series)[1:20]
plt.plot(pacf_1)
plt.show()
test_df = pd.DataFrame([pacf_1]).T
test_df.columns = ['Pandas Partial Autocorrelation']
test_df.index += 1
test_df.plot(kind='bar')
#from the figures we conclude that it is an AR process with a lag of 8-9
# In[12]:
from keras.models import Sequential
from keras.layers import Dense,Activation,Dropout
from sklearn import preprocessing
from keras.wrappers.scikit_learn import KerasRegressor
from keras.layers.recurrent import LSTM
# In[ ]:
"""
Arima Rolling Forecast
"""
predicted1, resid_test = [], []
history = train
for t in range(len(test)):
model = ARIMA(history, order=(9,0,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
resid_test.append(test[t] - output[0])
predicted1.append(yhat)
obs = test[t]
history.append(obs)
print('predicted=%f, expected=%f' % (yhat, obs))
test_resid = []
for i in resid_test:
test_resid.append(i[0])
error = mean_squared_error(test, predicted1)
print('Test MSE: %.3f' % error)
plt.plot(test)
plt.plot(predicted1, color='red')
plt.show()
# In[ ]:
"""
Residual Diagnostics
"""
train, test = series[0:size], series[size:len(series)]
model = ARIMA(train, order=(9,0,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
plt.show()
residuals.plot(kind='kde')
plt.show()
print(residuals.describe())
#plot the acf for the residuals
acf_1 = acf(model_fit.resid)[1:20]
plt.plot(acf_1)
test_df = pd.DataFrame([acf_1]).T
test_df.columns = ["Pandas Autocorrelation"]
test_df.index += 1
test_df.plot(kind='bar')
#from the acf obtained from the residuals we concule that
#there is still a nonlinear relationship among the residuals
# In[ ]:
"""
Hybrid Model
"""
window_size = 50
def make_model(window_size):
model = Sequential()
model.add(Dense(50, input_dim=window_size, init="uniform",
activation="tanh"))
model.add(Dense(25, init="uniform", activation="tanh"))
model.add(Dense(1))
model.add(Activation("linear"))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
#def make_lstm_model():
# model = Sequential()
# model.add(LSTM(
# input_dim=1,
# output_dim=50,
# return_sequences=True))
# model.add(Dropout(0.2))
# model.add(LSTM(
# 100,
# return_sequences=False))
# model.add(Dropout(0.2))
# model.add(Dense(
# output_dim=1))
# model.add(Activation("linear"))
# model.compile(loss="mse", optimizer="rmsprop")
# return model
model = make_model(50)
#lstm_model = make_lstm_model()
min_max_scaler = preprocessing.MinMaxScaler()
train = np.array(train).reshape(-1,1)
train_scaled = min_max_scaler.fit_transform(test_data)
train_X,train_Y = [],[]
for i in range(0 , len(train_scaled) - window_size):
train_X.append(train_scaled[i:i+window_size])
train_Y.append(train_scaled[i+window_size])
new_train_X,new_train_Y = [],[]
for i in train_X:
new_train_X.append(i.reshape(-1))
for i in train_Y:
new_train_Y.append(i.reshape(-1))
new_train_X = np.array(new_train_X)
new_train_Y = np.array(new_train_Y)
#new_train_X = np.reshape(new_train_X, (new_train_X.shape[0], new_train_X.shape[1], 1))
model.fit(new_train_X,new_train_Y, nb_epoch=500, batch_size=512, validation_split = .05)
# In[ ]:
test_extended = train.tolist()[-1*window_size:] + test_resid
test_data = []
for i in test_extended:
try:
test_data.append(i[0])
except:
test_data.append(i)
test_data = np.array(test_data).reshape(-1,1)
min_max_scaler = preprocessing.MinMaxScaler()
test_scaled = min_max_scaler.fit_transform(test_data)
test_X,test_Y = [],[]
for i in range(0 , len(test_scaled) - window_size):
test_X.append(test_scaled[i:i+window_size])
test_Y.append(test_scaled[i+window_size])
new_test_X,new_test_Y = [],[]
for i in test_X:
new_test_X.append(i.reshape(-1))
for i in test_Y:
new_test_Y.append(i.reshape(-1))
new_test_X = np.array(new_test_X)
new_test_Y = np.array(new_test_Y)
#new_test_X = np.reshape(new_test_X, (new_test_X.shape[0], new_test_X.shape[1], 1))
predictions = model.predict(new_train_X)
predictions_rescaled=min_max_scaler.inverse_transform(predictions)
Y = pd.DataFrame(new_train_Y)
pred =
|
pd.DataFrame(predictions)
|
pandas.DataFrame
|
# coding: utf-8 -*-
'''
GSI.py contains utility functions for GSI
'''
__all__ = ['GSIstat']
import numpy as _np
import pandas as _pd
import re as _re
class GSIstat(object):
'''
Object containing the GSI statistics
'''
def __init__(self,filename,adate):
'''
Initialize the GSIstat object
INPUT:
filename = filename of the gsistat file
adate = analysis date
OUTPUT:
GSIstat: object containing the contents of the filename
'''
self.filename = filename
self.analysis_date = adate
fh = open(self.filename,'rb')
self._lines = fh.readlines() # Keep lines private
fh.close()
# Initialize cache for fast parsing
self._cache = {}
return
def extract(self,name):
'''
From the gsistat file, extract information:
INPUT:
name = information seeked
Valid options are:
ps, oz, uv, t, q, gps, rad, cost
OUTPUT:
df = dataframe containing information
'''
# If name has already been parsed,
# just return it from cache
if name in self._cache:
df = self._cache[name]
return df
if name in ['ps']:
df = self._get_ps()
elif name in ['oz']:
df = self._get_ozone()
elif name in ['uv','t','q','gps']:
df = self._get_conv(name)
elif name in ['rad']:
df = self._get_radiance()
elif name in ['cost']:
df = self._get_cost()
else:
raise IOError('option %s is not defined' % name)
# Drop the o-g from the indicies list
if 'o-g' in list(df.index.names):
df.reset_index(level='o-g',drop=True,inplace=True)
# Add datetime index
df = self._add_datetime_index(df)
# Cache it for faster access
self._cache[name] = df
return df
def _add_datetime_index(self,df):
'''
Add the datetime as the first index
INPUT:
df = dataframe without datetime index
OUTPUT:
df = dataframe with datetime as the 1st index
'''
# If date is already present, return
if 'date' in list(df.index.names):
return df
indices = ['date'] + list(df.index.names)
df['date'] = self.analysis_date
df.set_index('date', append=True, inplace=True)
df = df.reorder_levels(indices)
return df
def extract_instrument(self,obtype,instrument):
'''
From the gsistat file, extract detailed information on an instrument:
INPUT:
obtype = observation type to extract (rad or oz)
instrument = instrument name [must be in the observation type]
E.g.:
amsua, mhs, iasi, hirs, etc
OUTPUT:
df = dataframe containing information
'''
# If instrument has already been parsed,
# just return it from cache
if instrument in self._cache:
df = self._cache[instrument]
return df
# Ensure obtype is already called,
# if not call it and cache it
if obtype in list(self._cache.keys()):
otype = self._cache[obtype]
else:
otype = self.extract(obtype)
self._cache[obtype] = otype
instruments = sorted(otype.index.get_level_values('instrument').unique())
satellites = sorted(otype.index.get_level_values('satellite' ).unique())
if instrument not in instruments:
print('Instrument %s not found!' % instrument)
print('%s contains ...' % self.filename)
print(', '.join(str(x) for x in instruments))
return None
# Handle special instruments
if instrument in ['iasi','iasi616']:
inst = 'iasi616'
elif instrument in ['airs','airs281SUBSET']:
inst = 'airs281SUBSET'
else:
inst = instrument
tmp = []
pattern = '\s+\d+\s+\d+\s+%s_\S+\s+\d+\s+\d+\s+' % (inst)
for line in self._lines:
if _re.match(pattern,line):
tst = line.strip().split()
tst = tst[:2] + tst[2].split('_') + tst[3:]
tmp.append(tst)
columns = ['it','channel','instrument','satellite','nassim','nrej','oberr','OmF_bc','OmF_wobc','col1','col2','col3']
df = _pd.DataFrame(data=tmp,columns=columns)
df.drop(['col1','col2','col3'],inplace=True,axis=1)
df[['channel','nassim','nrej']] = df[['channel','nassim','nrej']].astype(_np.int)
df[['oberr','OmF_bc','OmF_wobc']] = df[['oberr','OmF_bc','OmF_wobc']].astype(_np.float)
# Since iteration number is not readily available, make one
lendf = len(df)
nouter = lendf / len(df['it'].unique())
douter = lendf / nouter
it = _np.zeros(lendf,dtype=int)
for i in range(nouter):
its = douter * i
ite = douter * (i+1)
it[its:ite] = i+1
df['it'] = it
df = df[['it','instrument','satellite','channel','nassim','nrej','oberr','OmF_bc','OmF_wobc']]
df.set_index(['it','instrument','satellite','channel'],inplace=True)
return df
# Surface pressure Fit
def _get_ps(self):
'''
Search for surface pressure
'''
pattern = 'obs\s+type\s+stype\s+count'
for line in self._lines:
if _re.search(pattern,line):
header = 'o-g ' + line.strip()
break
tmp = []
pattern = ' o-g (\d\d) %7s' % ('ps')
for line in self._lines:
if _re.match(pattern,line):
# don't add monitored or rejected data
if any(x in line for x in ['mon','rej']):
continue
tmp.append(line.strip().split())
columns = header.split()
df =
|
_pd.DataFrame(data=tmp,columns=columns)
|
pandas.DataFrame
|
from datetime import datetime, timedelta
from io import BytesIO, StringIO
import pandas as pd
from django.conf import settings
from django.core.mail.message import EmailMessage
from django.db.models import Q
from django.template.loader import get_template
from django.utils import timezone
from api import email
from api.models import PaymentFile
from api.s3util import save_to_s3_payment, save_to_s3_daily_sales_report
from api.utils import to_int, get_or_none
from employee.models import TaskEmail
from restapi.service.tallyintegration import bank_transfer_payments
from team.models import OutWardPayment, ManualBooking
def create_payment_file(new_file_name, date, content, summary=False):
pf = get_or_none(PaymentFile, name=new_file_name)
if pf:
pf.upload.delete_from_s3()
pf.upload.delete()
pf.delete()
s3_upload = save_to_s3_payment(new_file_name, content)
pf = PaymentFile.objects.create(upload=s3_upload, date=date, name=new_file_name, summary=summary)
dl_url = s3_upload.public_url()
return dl_url
def send_payment_file_email(filename, link, to):
subject = 'Payment File %s' % filename
text_body = 'Visit this link to download the file - %s' % link
html_body = 'Click <a href="%s" download="%s">here</a> to download the file' % (link, filename)
email.send(subject, body=text_body, html=html_body, to=to)
def get_today_payments():
outward = OutWardPayment.objects.filter(payment_date__lte=datetime.now().today()).exclude(
bank_account=None).exclude(status__in=['paid', 'reconciled'])
if outward.exists():
bank_transfer_payments(payments=outward)
payment_list = []
for payment in outward:
data = []
if payment.payment_mode == 'neft':
mode = 'N'
elif payment.payment_mode == 'rtgs':
mode = 'R'
elif payment.payment_mode == 'imps':
mode = 'M'
elif payment.payment_mode == 'hdfc_internal_account':
mode = 'I'
else:
mode = 'I'
data.append(mode)
data.append(payment.bank_account.beneficiary_code[:14] if payment.bank_account else '')
data.append(payment.bank_account.account_number if payment.bank_account else '')
data.append(str(to_int(payment.actual_amount)))
data.append(payment.bank_account.account_holder_name if payment.bank_account else '')
data.append('')
data.append('')
data.append('')
data.append('')
data.append('')
data.append('')
data.append('')
data.append('')
if payment.booking_id.exclude(lr_numbers=None).exists():
booking = payment.booking_id.first()
lr_number = booking.lr_numbers.all().first().lr_number
narration = '{}OW{}'.format(lr_number[:12], str(payment.id).zfill(6))
else:
booking = payment.booking_id.first()
narration = '{}OW{}'.format(booking.booking_id[:12], str(payment.id).zfill(6))
data.append(narration[:20])
data.append('')
data.append('')
data.append('')
data.append('')
data.append('')
data.append('')
data.append('')
data.append('')
data.append(str(payment.payment_date.strftime('%d/%m/%Y')))
data.append('')
data.append(payment.bank_account.ifsc if payment.bank_account else '')
data.append('')
data.append('')
data.append('')
payment_list.append(data)
payment.status = 'paid'
payment.save()
df = pd.DataFrame(payment_list)
string_io = StringIO()
df.to_csv(string_io, index=False, header=False)
content = string_io.getvalue() or '\n'
date = timezone.now().date()
new_file_name = PaymentFile.get_next_file_name(date=date)
dl_url = create_payment_file(new_file_name, date, content)
task = TaskEmail.objects.get(task_id=4, office_id=1)
email_id_list = list(task.employee.values_list('username__profile__email', flat=True))
send_payment_file_email(filename=new_file_name, link=dl_url, to=email_id_list)
return dl_url
return None
def send_sales_report():
mb = ManualBooking.objects.filter(
created_on__date=datetime.now().date() - timedelta(days=1)
).exclude(
booking_status='cancelled'
).order_by(
'-shipment_date'
)
if mb:
data = []
for booking in mb:
try:
data.append([
booking.booking_id,
'\n'.join(booking.lr_numbers.values_list('lr_number', flat=True)),
booking.from_city,
booking.to_city,
booking.party_rate,
booking.charged_weight,
booking.total_amount_to_company,
booking.supplier_rate,
booking.supplier_charged_weight,
booking.total_amount_to_owner,
booking.total_amount_to_company - booking.total_amount_to_owner,
"{0:.2f}".format(((
booking.total_amount_to_company - booking.total_amount_to_owner) / booking.total_amount_to_owner) * 100)
])
except ZeroDivisionError:
data.append([
booking.booking_id,
'\n'.join(booking.lr_numbers.values_list('lr_number', flat=True)),
booking.from_city,
booking.to_city,
booking.party_rate,
booking.charged_weight,
booking.total_amount_to_company,
booking.supplier_rate,
booking.supplier_charged_weight,
booking.total_amount_to_owner,
booking.total_amount_to_company - booking.total_amount_to_owner,
'Amount to owner is zero'
])
df = pd.DataFrame(data=data,
columns=['Booking ID', 'LR Number(s)', 'From City', 'To City', 'Party Rate',
'Party Weight',
'Party Amount', 'Supplier Rate', 'Supplier Weight', 'Supplier Amount', 'Profit',
'% Profit'])
string_io = StringIO()
df.to_csv(string_io, index=False, header=False)
content = string_io.getvalue() or '\n'
filename = datetime.now().strftime('%d%b%Y%I%M') + '.csv'
s3_upload = save_to_s3_daily_sales_report(filename, content)
s3_url = s3_upload.public_url()
subject = '[Aaho] Daily Sales Report for ' + (datetime.now().date() - timedelta(days=1)).strftime(
'%d-%b-%Y')
body = get_template('team/emails/last-day-bookings.html').render(context={'mb': mb, 's3_url': s3_url})
email = EmailMessage(subject, body,
to=['<EMAIL>', '<EMAIL>', '<EMAIL>', '<EMAIL>'])
email.content_subtype = 'html'
if settings.ENABLE_MAIL and not settings.TESTING:
email.send()
return s3_url
return None
def send_payment_summary_email(filename, link, to):
subject = 'Payment Summary File %s' % filename
text_body = 'Visit this link to download the file - %s' % link
html_body = 'Click <a href="%s" download="%s">here</a> to download the file' % (link, filename)
email.send(subject, body=text_body, html=html_body, to=to)
def payment_summary_today():
outward = OutWardPayment.objects.filter(payment_date__lte=datetime.now().today()).exclude(
bank_account=None).exclude(status='unpaid')
data = []
for value in outward:
temp = []
booking_id = ', '.join(list(value.booking_id.values_list('booking_id', flat=True)))
lr_number = ''
bookings = value.booking_id.all()
for lr in bookings:
lr_number += ', '.join(lr.lr_numbers.values_list('lr_number', flat=True))
temp.append(booking_id)
temp.append(lr_number)
temp.append(value.bank_account.account_holder_name)
temp.append(str(value.actual_amount))
data.append(temp)
df =
|
pd.DataFrame(data, columns=['BOOKING ID', 'LR NUMBER', 'BENEFICIARY NAME', 'AMOUNT'])
|
pandas.DataFrame
|
import composeml as cp
import numpy as np
import pandas as pd
import pytest
from dask import dataframe as dd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import NaturalLanguage
from featuretools.computational_backends.calculate_feature_matrix import (
FEATURE_CALCULATION_PERCENTAGE
)
from featuretools.entityset import EntitySet, Timedelta
from featuretools.exceptions import UnusedPrimitiveWarning
from featuretools.primitives import (
GreaterThanScalar,
Max,
Mean,
Min,
Sum,
make_agg_primitive,
make_trans_primitive
)
from featuretools.synthesis import dfs
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import Library
@pytest.fixture
def datetime_es():
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5],
"card_id": [1, 1, 5, 1, 5],
"transaction_time": pd.to_datetime([
'2011-2-28 04:00', '2012-2-28 05:00',
'2012-2-29 06:00', '2012-3-1 08:00',
'2014-4-1 10:00']),
"fraud": [True, False, False, False, True]})
datetime_es = EntitySet(id="fraud_data")
datetime_es = datetime_es.add_dataframe(
dataframe_name="transactions",
dataframe=transactions_df,
index="id",
time_index="transaction_time")
datetime_es = datetime_es.add_dataframe(
dataframe_name="cards",
dataframe=cards_df,
index="id")
datetime_es = datetime_es.add_relationship("cards", "id", "transactions", "card_id")
datetime_es.add_last_time_indexes()
return datetime_es
def test_passing_strings_to_logical_types_dfs():
teams = pd.DataFrame({
'id': range(3),
'name': ['Breakers', 'Spirit', 'Thorns']
})
games = pd.DataFrame({
'id': range(5),
'home_team_id': [2, 2, 1, 0, 1],
'away_team_id': [1, 0, 2, 1, 0],
'home_team_score': [3, 0, 1, 0, 4],
'away_team_score': [2, 1, 2, 0, 0]
})
dataframes = {'teams': (teams, 'id', None, {'name': 'natural_language'}), 'games': (games, 'id')}
relationships = [('teams', 'id', 'games', 'home_team_id')]
features = dfs(dataframes, relationships, target_dataframe_name="teams", features_only=True)
name_logical_type = features[0].dataframe['name'].ww.logical_type
assert isinstance(name_logical_type, NaturalLanguage)
def test_accepts_cutoff_time_df(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [10, 12, 15]})
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert len(feature_matrix.index) == 3
assert len(feature_matrix.columns) == len(features)
def test_warns_cutoff_time_dask(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [10, 12, 15]})
cutoff_times_df = dd.from_pandas(cutoff_times_df, npartitions=2)
match = "cutoff_time should be a Pandas DataFrame: " \
"computing cutoff_time, this may take a while"
with pytest.warns(UserWarning, match=match):
dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df)
def test_accepts_cutoff_time_compose(dataframes, relationships):
def fraud_occured(df):
return df['fraud'].any()
lm = cp.LabelMaker(
target_dataframe_name='card_id',
time_index='transaction_time',
labeling_function=fraud_occured,
window_size=1
)
transactions_df = to_pandas(dataframes['transactions'][0])
labels = lm.search(
transactions_df,
num_examples_per_instance=-1
)
labels['time'] = pd.to_numeric(labels['time'])
labels.rename({'card_id': 'id'}, axis=1, inplace=True)
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="cards",
cutoff_time=labels)
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 6
assert len(feature_matrix.columns) == len(features) + 1
def test_accepts_single_cutoff_time(dataframes, relationships):
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=20)
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 5
assert len(feature_matrix.columns) == len(features)
def test_accepts_no_cutoff_time(dataframes, relationships):
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
instance_ids=[1, 2, 3, 5, 6])
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 5
assert len(feature_matrix.columns) == len(features)
def test_ignores_instance_ids_if_cutoff_df(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [10, 12, 15]})
instance_ids = [1, 2, 3, 4, 5]
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
instance_ids=instance_ids)
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 3
assert len(feature_matrix.columns) == len(features)
def test_approximate_features(pd_dataframes, relationships):
# TODO: Update to use Dask dataframes when issue #985 is closed
cutoff_times_df = pd.DataFrame({"instance_id": [1, 3, 1, 5, 3, 6],
"time": [11, 16, 16, 26, 17, 22]})
# force column to BooleanNullable
pd_dataframes['transactions'] += ({'fraud': "BooleanNullable"},)
feature_matrix, features = dfs(dataframes=pd_dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
approximate=5,
cutoff_time_in_index=True)
direct_agg_feat_name = 'cards.PERCENT_TRUE(transactions.fraud)'
assert len(feature_matrix.index) == 6
assert len(feature_matrix.columns) == len(features)
truth_values = pd.Series(data=[1.0, 0.5, 0.5, 1.0, 0.5, 1.0])
assert (feature_matrix[direct_agg_feat_name] == truth_values.values).all()
def test_all_columns(pd_dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [10, 12, 15]})
instance_ids = [1, 2, 3, 4, 5]
feature_matrix, features = dfs(dataframes=pd_dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
instance_ids=instance_ids,
agg_primitives=[Max, Mean, Min, Sum],
trans_primitives=[],
groupby_trans_primitives=["cum_sum"],
max_depth=3,
allowed_paths=None,
ignore_dataframes=None,
ignore_columns=None,
seed_features=None)
assert len(feature_matrix.index) == 3
assert len(feature_matrix.columns) == len(features)
def test_features_only(dataframes, relationships):
if len(dataframes['transactions']) > 3:
dataframes['transactions'][3]['fraud'] = "BooleanNullable"
else:
dataframes['transactions'] += ({'fraud': "BooleanNullable"},)
features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
features_only=True)
# pandas creates 11 features
# dask creates 10 features (no skew)
# koalas creates 9 features (no skew, no percent_true)
if isinstance(dataframes['transactions'][0], pd.DataFrame):
expected_features = 11
elif isinstance(dataframes['transactions'][0], dd.DataFrame):
expected_features = 10
else:
expected_features = 9
assert len(features) == expected_features
def test_accepts_relative_training_window(datetime_es):
# TODO: Update to use Dask dataframes when issue #882 is closed
feature_matrix, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions")
feature_matrix_2, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-4-1 04:00"))
feature_matrix_3, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-4-1 04:00"),
training_window=Timedelta("3 months"))
feature_matrix_4, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-4-1 04:00"),
training_window="3 months")
assert (feature_matrix.index == [1, 2, 3, 4, 5]).all()
assert (feature_matrix_2.index == [1, 2, 3, 4]).all()
assert (feature_matrix_3.index == [2, 3, 4]).all()
assert (feature_matrix_4.index == [2, 3, 4]).all()
# Test case for leap years
feature_matrix_5, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=
|
pd.Timestamp("2012-2-29 04:00")
|
pandas.Timestamp
|
from typing import List
import pandas as pd
from pyjackson.core import ArgList, Field
from pyjackson.decorators import cached_property
from pyjackson.errors import DeserializationError, SerializationError
from ebonite.core.analyzer.base import TypeHookMixin
from ebonite.core.analyzer.dataset import DatasetHook
from ebonite.core.objects.dataset_type import DatasetType, LibDatasetTypeMixin
class PandasHook(TypeHookMixin, DatasetHook):
"""
:class:`.DatasetHook` implementation for `pandas.DataFrame` which uses :class:`DataFrameType`
"""
valid_types = [pd.DataFrame]
def process(self, obj, **kwargs) -> DatasetType:
return DataFrameType(list(obj.columns))
class SeriesType(LibDatasetTypeMixin):
"""
:class:`.DatasetType` implementation for `pandas.Series` objects which stores them as built-in Python dicts
:param columns: list of columns names in dataset
"""
real_type = pd.Series
libraries = [pd]
def __init__(self, columns: List[str]):
self.columns = columns
def deserialize(self, obj):
return pd.Series(obj)
def serialize(self, instance: pd.Series):
return instance.to_dict()
def get_spec(self):
return [Field(c, float, False) for c in self.columns] # TODO typing
class DataFrameType(LibDatasetTypeMixin):
"""
:class:`.DatasetType` implementation for `pandas.DataFrame` objects which stores them as
built-in Python dicts with the only key `values` and value in a form of records list.
:param columns: list of columns names in dataset
"""
real_type = pd.DataFrame
libraries = [pd]
def __init__(self, columns: List[str]):
self.columns = columns
def deserialize(self, obj):
self._check_type(obj, dict, DeserializationError)
try:
ret =
|
pd.DataFrame.from_records(obj['values'])
|
pandas.DataFrame.from_records
|
import pandas as pd
import os
import numpy as np
os.chdir("D:\Luyu\COTA")
st =
|
pd.read_csv("stops_full.csv")
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import os.path
import datetime as dt
import matplotlib.cm as cmx
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import plotly.offline as offline
from matplotlib import style
# Global variables
outputDirPrefix = "../Plots/"
outputDirExtras = outputDirPrefix + "Extras/"
outputDirMonthsYearEachState = outputDirPrefix + "Months Year Each State/"
outputDirMonthsEachYear = outputDirPrefix + "Months Each Years/"
outputDirChoropleth = outputDirPrefix + "Plotly's HTML Choropleth/"
outputDirTop3State = outputDirExtras + "Top 3 State/"
datasetNameTrain = "../Dataset/clean_and_relevant_training.csv"
datasetNameTest = "../Dataset/clean_and_relevant_testing.csv"
pollutant_type = ["CO", "NO2", "O3", "SO2"]
us_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'Florida': 'FL',
'Georgia': 'GA',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virginia': 'VA',
'District Of Columbia': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY',
}
for path in [outputDirPrefix, outputDirExtras, outputDirMonthsYearEachState, outputDirMonthsEachYear,
outputDirChoropleth, outputDirTop3State, outputDirChoropleth+"CO", outputDirChoropleth+"NO2",
outputDirChoropleth+"O3", outputDirChoropleth+"SO2"]:
if not os.path.exists(path):
os.makedirs(path)
# Date loading and pre-processing
# if the pre processed file does not exist
if not os.path.isfile(datasetNameTrain):
# if preprocessed data does not exist pre process the raw data set
complete = pd.read_csv("../Dataset/pollution_us.csv", index_col = 0)
# slice dataset by half
training = complete.iloc[0:int(len(complete)/2), :]
testing = complete.iloc[int(len(complete)/2):, :]
# Impute the NA values
coAQITrain = training["CO AQI"]
coAQITrain.fillna(method='bfill', inplace=True)
so2AQITrain = training["SO2 AQI"]
so2AQITrain.fillna(method='ffill', inplace=True)
# Apply the imputed values
training["CO AQI"] = coAQITrain
training["SO2 AQI"] = so2AQITrain
# Convert to date time object add 2 new columns called Year and Month
year_months = []
years = []
dateLoc = training["Date Local"]
for date in dateLoc:
temp = dt.datetime.strptime(date, "%Y-%m-%d")
if (temp.month < 10):
month = "0" + str(temp.month)
else:
month = str(temp.month)
year_months.append(str(temp.year) + "-" + month)
years.append(str(temp.year))
training["Year Month"] = pd.Series(year_months, index = training.index)
training["Year"] = pd.Series(years, index = training.index)
training = training.drop_duplicates()
training.to_csv(datasetNameTrain, index = True, mode = "w")
# Impute the NA values
coAQITrain = testing["CO AQI"]
coAQITrain.fillna(method='bfill', inplace=True)
so2AQITrain = testing["SO2 AQI"]
so2AQITrain.fillna(method='ffill', inplace=True)
testing["CO AQI"] = coAQITrain
testing["SO2 AQI"] = so2AQITrain
dateLoc = testing["Date Local"]
# Convert to date time object add 2 new columns called Year and Month
year_months = []
years = []
for date in dateLoc:
temp = dt.datetime.strptime(date, "%Y-%m-%d")
if (temp.month < 10):
month = "0" + str(temp.month)
else:
month = str(temp.month)
year_months.append(str(temp.year) + "-" + month)
years.append(str(temp.year))
testing["Year Month"] = pd.Series(year_months, index = testing.index)
testing["Year"] = pd.Series(years, index = testing.index)
testing = testing.drop_duplicates()
testing.to_csv(datasetNameTest, index = True, mode = "w")
del testing
del training
#convert the date local to panda's date time object
training = pd.read_csv(datasetNameTrain, index_col = 0)
training["Date Local"] = pd.to_datetime(training["Date Local"], format = "%Y-%m-%d")
# re defining years
years = [2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009]
# Exploratory analysis
'''
#get a better feel of the dataset
print(complete.drop_duplicates())
print(complete.shape[0] - complete.drop_duplicates().shape[0])
print(complete.isnull().sum())
print(complete.describe())
print(complete.head())
print(complete.tail())
print(complete.dtypes)
print(complete.shape)
#making sure that all observations are sorted by date
#in case if i subset it wrongly.
print((complete["Date Local"].sort - complete["Date Local"]).unique())
'''
# Exploratory plots
# Sum for each pollution plot
pollutant_list = []
# Get all attributes needed
for pollutant in pollutant_type:
# normalize data
if training[pollutant+" Units"].unique()[0] == "Parts per million":
temp = training[pollutant+" Mean"].sum() / 1000
else:
temp = training[pollutant+" Mean"].sum()
pollutant_list.extend([{"pollutant.type":pollutant, "pollutant.measurement":"billion", "pollutant.sum":temp}])
# Make it into a pandas df
pollutant_sum_df = pd.DataFrame(pollutant_list)
style.use('ggplot')
ax = pollutant_sum_df[["pollutant.type","pollutant.sum"]].plot(kind='bar', title ="Pollution summation", figsize=(15, 10), legend = False, fontsize=12)
ax.set_xlabel("Pollution Type", fontsize=12)
ax.set_ylabel("Billion", fontsize=12)
plt.title("Pollution summation")
plt.xticks(range(len(pollutant_sum_df)), pollutant_sum_df[["pollutant.type"]].values.flatten())
plt.savefig(outputDirExtras + "summation of each pollutant")
plt.show()
# Average pollution over the years plot
pollutant_list = []
for pollutant in pollutant_type:
subset = training[["Date Local", pollutant + " Mean"]]
for year in training["Year"].unique():
pollutant_list.extend([{"pollution.type":pollutant, "year":year, "pollution.mean":training[training["Date Local"].dt.year == year][pollutant+" Mean"].mean(), "pollutant.measurement":training[pollutant+" Units"].unique()[0]}])
pollutant_mean_df = pd.DataFrame(pollutant_list)
fig, ax = plt.subplots(1,1)
pollutant_mean_df.groupby("pollution.type").plot(x="year", y="pollution.mean", ax = ax)
ax.ticklabel_format(useOffset = False)
ax.legend(labels=['CO', 'NO2', 'O3', 'SO2'])
ax.set_ylabel("Billion")
ax.set_xlabel("Year")
plt.title("Pollution mean over years")
plt.savefig(outputDirExtras + "pollution mean")
plt.show()
# Normalized of each pollution across the years plot
pollutant_list = []
pollutant_list2 = []
avg = np.zeros(len(years))
for pollutant in pollutant_type:
mean = np.array([])
for year in years:
mean = np.append(mean, training[training["Date Local"].dt.year == year][pollutant+" Mean"].mean())
maxVal = mean.max()
minVal = mean.min()
norm = np.array([])
for temp in mean:
norm = np.append(norm, (temp - minVal) / (maxVal - minVal))
avg = avg + norm
for idx in range(len(years)):
pollutant_list.extend([{"pollution.type":pollutant, "year":years[idx], "normalized.pollution":norm[idx]}])
pollutant_norm_df = pd.DataFrame(pollutant_list)
fig, ax = plt.subplots(1,1)
pollutant_norm_df.groupby("pollution.type").plot(x="year", y="normalized.pollution", ax = ax)
ax.ticklabel_format(useOffset = False)
ax.legend(labels=['CO', 'NO2', 'O3', 'SO2'], prop = {"size":7}, loc = "upper right")
ax.set_ylabel("Normalized")
ax.set_xlabel("Year")
plt.title("Pollution mean normalized over years")
plt.savefig(outputDirExtras + "normalize pollutant over the years")
plt.show()
plt.clf()
# Generalized pollution distribution across the years plot
avg = avg / len(pollutant_type)
for idx in range(len(years)):
pollutant_list2.extend([{"year":years[idx], "generalized.pollution":avg[idx]}])
pollutant_gen_df =
|
pd.DataFrame(pollutant_list2)
|
pandas.DataFrame
|
import os
assert os.environ['CONDA_DEFAULT_ENV']=='skbio_env', 'You should use the conda environment skbio_env'
import numpy as np
from skbio.stats.ordination import cca
import pandas as pd
import matplotlib.pylab as plt
from copy import copy
import matplotlib.colors as mcolors
import seaborn as sns
from matplotlib.patches import Patch
redFspecies = True
spl = [6,11,25,250]
tits = ['(a)', '(b)', '(c)', '(d)']
Allvars = False
noise = [True,False]
plott=True#False#
dirRead = '/Users/nooteboom/Documents/GitHub/cluster_TM/cluster_SP/density/dens/ordination/'
minss = [100,200, 300, 400, 500, 600, 700, 800, 900,1000] # The s_min values
xiss = np.arange(0.0001,0.01, 0.0001) # The xi values
fig, ax = plt.subplots(2,3, figsize=(16,16),
gridspec_kw={'width_ratios':[1,1,0.08]})
ax[0,0].get_shared_y_axes().join(ax[1,0])
ax[0,1].get_shared_y_axes().join(ax[1,1])
for axs in ax[:, 2]:
axs.remove()
gs = ax[1, 2].get_gridspec()
axbig = fig.add_subplot(gs[:, 2])
sns.set(style='whitegrid',context='paper', font_scale=2)
fs=20
vs = np.array([-1,1])*0.8
for spi, sp in enumerate(spl):
print(sp)
# keep track of the results
# F and D stand for Foram and Dino
# noise keeps track of CCA results if noisy locations are included
# cluster keeps track of results if noisy locations are excluded
FNoise = np.zeros((len(minss), len(xiss)))
DNoise = np.zeros((len(minss), len(xiss)))
FCluster = np.zeros((len(minss), len(xiss)))
DCluster = np.zeros((len(minss), len(xiss)))
for mini,mins in enumerate(minss):
print('min: %d'%(mins))
for xii, xis in enumerate(xiss):
opts = ["xi", xis]
if(redFspecies):
ff = np.load('loops/redF/prepredF_CCA_sp%d_smin%d%s_%.5f.npz'%(sp, mins, opts[0], opts[1]))
else:
ff = np.load(dirRead+'loops/prep_CCA_sp%d_smin%d%s_%.5f.npz'%(sp, mins, opts[0], opts[1]))
#%%
envs = ff['envnames']
if(Allvars):
envsplt = ff['envnames']
else:
envsplt = ff['envnames']
envsplt = ['temp','N']
Flabels = ff['Flabels']
Flabelsfull = copy(Flabels)
Fenv = ff['Fenv']
for ni,n in enumerate(noise):
envs = ff['envnames']
envsplt = ['temp','N']
Flabels = ff['Flabels']
Fenv = ff['Fenv']
Fenv_nn = ff['Fenv_nn']
#%% Foraminifera
data = ff['data']
sites = np.array(['site %d'%(i) for i in range(data.shape[0])])
species = np.array(['species %d'%(i) for i in range(data.shape[1])])
if(not n):
args = np.where(Flabels!=-1)
data = data[args]
Flabels = Flabels[args]
sites = sites[args]
Fenv = Fenv[args]
Fenv_nn = Fenv_nn[args]
X =
|
pd.DataFrame(data, sites, species)
|
pandas.DataFrame
|
from bitshares import BitShares
from bitshares.instance import set_shared_bitshares_instance
from bitshares.market import Market
import pandas as pd
import time
def setup_bitshares_market(bts_symbol):
bitshares_instance = BitShares(
"wss://siliconvalley.us.api.bitshares.org/ws",
nobroadcast=True # <<--- set this to False when you want to fire!
)
set_shared_bitshares_instance(bitshares_instance)
bts_market = Market(
bts_symbol,
bitshares_instance=bitshares_instance
)
return bts_market
def get_bts_orderbook_df(ob, type, vol2: bool):
price_vol = list()
if vol2:
for i in range(len(ob[type])):
price = ob[type][i]['price']
invert_price = 1/price
vol = ob[type][i]['quote']
vol2 = ob[type][i]['base'] # is this the actual volume?
price_vol.append([price, vol['amount'], vol2['amount'], invert_price])
df = pd.DataFrame(price_vol)
df.columns = ['price', 'vol', 'vol_base', 'invert']
else:
for i in range(len(ob[type])):
price = ob[type][i]['price']
invert_price = 1/price
vol = ob[type][i]['quote']
price_vol.append([price, vol['amount'], invert_price])
df = pd.DataFrame(price_vol)
df.columns = ['price', 'vol', 'invert']
df['timestamp'] = int(time.time())
df['type'] = type
return df
def get_bts_ob_data(bts_market, depth: int):
vol2 = False
# get bitshares order book for current market
bts_orderbook = bts_market.orderbook(limit=depth)
ask_df = get_bts_orderbook_df(bts_orderbook, 'asks', vol2)
bid_df = get_bts_orderbook_df(bts_orderbook, 'bids', vol2)
bts_df =
|
pd.concat([ask_df, bid_df])
|
pandas.concat
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pdb
def import_robot_data(df_path):
df =
|
pd.read_hdf(df_path)
|
pandas.read_hdf
|
#%%
from sklearn import datasets
import numpy
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
plt.style.use('ggplot')
#%%
# load iris dataset
iris = datasets.load_iris()
print(type(iris)) # it is bunch which is just like dictionary
print(iris.keys())
# print(data['feature_names'])
print(type(iris['data']))
print(iris['data'].shape)
print(iris['target'])
#%%
# sns.pairplot
def species(row):
if row['target'] == 0:
return 'setosa'
if row['target'] == 1:
return iris['target_names'][1]
if row['target'] == 2:
return iris['target_names'][2]
sns.set(style="ticks", color_codes=True)
# create dataframe of sklearn datasets
df_iris = pd.DataFrame(iris['data'], columns=iris['feature_names'])
Y = iris['target']
df_iris['target'] = Y
df_iris['flower'] = df_iris.apply(species, axis=1)
sns.pairplot(df_iris, hue='flower')
plt.show()
# performing the countplot
plt.figure()
sns.countplot(x='target', hue='flower', data=df_iris)
plt.show()
#%%
# eda analysis
df_iris =
|
pd.DataFrame(iris['data'], columns=iris['feature_names'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 12 12:29:19 2019
@author: sdenaro
"""
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime as dt
from datetime import timedelta
import numpy as np
import numpy.matlib as matlib
import seaborn as sns
from sklearn import linear_model
#from sklearn.metrics import mean_squared_error, r2_score
from scipy import stats
def r2(x, y):
return stats.pearsonr(x, y)[0] ** 2
#Set Preference Customers reduction percent (number)
custom_redux=0
# Yearly firm loads (aMW)
# upload BPA firm load column from file
df_load=pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=0,skiprows=[0,1], usecols=[9])
#Save as Preference Firm (PF), Industrial Firm (IF) an Export (ET)
PF_load_y=df_load.loc[[13]].values - custom_redux*df_load.loc[[13]].values
IP_load_y=df_load.loc[[3]].values - custom_redux* df_load.loc[[3]].values
ET_load_y=df_load.loc[[14]]
# Hourly hydro generation from FCRPS stochastic simulation
#df_hydro=pd.read_csv('../../CAPOW/CAPOW_SD/Stochastic_engine/PNW_hydro/FCRPS/BPA_owned_dams.csv', header=None)
df_hydro=pd.read_csv('new_BPA_hydro_daily.csv', usecols=([1]))
BPA_hydro=pd.DataFrame(data=df_hydro.loc[0:365*1200-1,:].sum(axis=1)/24, columns=['hydro'])
BPA_hydro[BPA_hydro>45000]=45000
#Remove CAISO bad_years
BPA_hydro=pd.DataFrame(np.reshape(BPA_hydro.values, (365,1200), order='F'))
BPA_hydro.drop([82, 150, 374, 377, 540, 616, 928, 940, 974, 980, 1129, 1191],axis=1, inplace=True)
#reshuffle
#BPA_hydro[[1, 122, 364, 543]]=BPA_hydro[[16, 126, 368, 547]]
BPA_hydro=pd.DataFrame(np.reshape(BPA_hydro.values, (365*1188), order='F'))
# Yearly resources other than hydro (aMW)
df_resources=pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=1,skiprows=[0,1], usecols=[9])
Nuc_y=df_resources.loc[[7]]
Wind_y=df_resources.loc[[8]]
Purch_y=df_resources.loc[[10]]
# Yearly costs and monthly rates (Oct-Sep)
costs_y=pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=3,skiprows=[0,3,4,5], usecols=[8])*pow(10,3)
PF_rates=pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=4,skiprows=np.arange(13,31), usecols=[0,7])
PF_rates.columns=['month','2018']
IP_rates=pd.read_excel('../DATA/net_rev_data.xlsx',sheet_name=5,skiprows=np.arange(13,31), usecols=[0,7])
IP_rates.columns=['month','2018']
#load BPAT hourly demand and wind and convert to daily
df_synth_load=
|
pd.read_csv('../../CAPOW/CAPOW_SD/Stochastic_engine/Synthetic_demand_pathflows/Sim_hourly_load.csv', usecols=[1])
|
pandas.read_csv
|
# coding=utf-8
# Author: <NAME>
# Date: Sept 02, 2019
#
# Description: Reads a MultiLayer network (HS, MM & DM) and extracts subgraphs based on parameters for the networkbrowser.
#
#
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import networkx as nx
from matplotlib import colors
from utils import get_network_layer, get_network_largest_connected_component, ensurePathExists
import argparse
#
from data_spermatocyte_pca_modules_dm import spermatocyte_pca_modules_dm
from data_spermatocyte_pca_modules_mm import spermatocyte_pca_modules_mm
from data_spermatocyte_pca_modules_hs import spermatocyte_pca_modules_hs
#
from data_enterocyte_pca_modules_dm import enterocyte_pca_modules_dm
from data_enterocyte_pca_modules_mm import enterocyte_pca_modules_mm
from data_enterocyte_pca_modules_hs import enterocyte_pca_modules_hs
cmap_meanfertrate = colors.LinearSegmentedColormap.from_list(name='cmap-mean-fert-rate', colors=['#d62728', '#1f77b4'], N=256)
def fert_rate_color(x):
if pd.isnull(x):
return '#FFFFFF' # white
else:
return colors.to_hex(cmap_meanfertrate(x)) # color
if __name__ == '__main__':
#
# Args
#
parser = argparse.ArgumentParser()
parser.add_argument("--celltype", default='spermatocyte', type=str, choices=['spermatocyte', 'enterocyte'], help="Cell type. Must be either 'spermatocyte' or 'enterocyte'. Defaults to spermatocyte")
parser.add_argument("--network", default='thr', type=str, help="Network to use. Defaults to 'thr'.")
parser.add_argument("--threshold", default=0.5, type=float, help="Threshold value. Defaults to 0.5.")
#
parser.add_argument("--add_modules", default=True, type=bool, help="Add PCA module information to the network.")
parser.add_argument("--add_conserved", default=True, type=bool, help="Add gene conservation information to the network.")
parser.add_argument("--add_core", default=True, type=bool, help="Add core gene information to the network.")
parser.add_argument("--add_backbone", default=True, type=bool, help="Add edge backbone to the network.")
parser.add_argument("--add_ortho_backbone", default=True, type=bool, help="Add edge ortho-backbone to the network.")
#
parser.add_argument("--add_mdlc_dge_results", default=True, type=bool, help="Add gene mdlc DGE results to the DM network.")
parser.add_argument("--add_splicing_defects", default=True, type=bool, help="Add gene mdlc splicing defects results to the DM network.")
#
parser.add_argument("--remove_isolates", default=True, type=bool, help="Remove isolate nodes from layers.")
parser.add_argument("--only_largest_component", default=True, type=bool, help="Only output the largest connected component.")
# parser.add_argument("--layer", default='DM', type=str, choices=['DM', 'MM', 'HS'], help="Network layer to compute SVD. Defaults to 'DM'.")
args = parser.parse_args()
#
celltype = args.celltype # spermatocyte or enterocyte
network = args.network
threshold = args.threshold
threshold_str = str(threshold).replace('.', 'p')
#
add_modules = args.add_modules
add_conserved = args.add_conserved
add_core = args.add_core
add_backbone = args.add_backbone
add_ortho_backbone = args.add_ortho_backbone
#
add_mdlc_dge_results = args.add_mdlc_dge_results
add_splicing_defects = args.add_splicing_defects
#
remove_isolates = args.remove_isolates
only_largest_component = args.only_largest_component
#
placeholder = {'HS': None, 'MM': None, 'DM': None}
data = {
'spermatocyte': {
'PCA': dict(placeholder),
'distance-angle': dict(placeholder),
'entropy': dict(placeholder),
'modules': {
'HS': spermatocyte_pca_modules_hs,
'MM': spermatocyte_pca_modules_mm,
'DM': spermatocyte_pca_modules_dm,
},
},
'enterocyte': {
'PCA': dict(placeholder),
'distance-angle': dict(placeholder),
'entropy': dict(placeholder),
'modules': {
'HS': enterocyte_pca_modules_hs,
'MM': enterocyte_pca_modules_mm,
'DM': enterocyte_pca_modules_dm,
}
}
}
#
print('Reading Network')
path_net = '../../04-network/results/network/{celltype:}/'.format(celltype=celltype)
if network == 'thr':
rGfile_gpickle = path_net + 'net-{celltype:s}-{network:s}-{threshold:s}.gpickle'.format(celltype=celltype, network=network, threshold=threshold_str)
G = nx.read_gpickle(rGfile_gpickle)
if add_conserved:
path_fpkm = '../../02-core_genes/results/FPKM/'
df_HS = pd.read_csv(path_fpkm + 'HS/HS-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_string')
df_MM = pd.read_csv(path_fpkm + 'MM/MM-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_string')
df_DM = pd.read_csv(path_fpkm + 'DM/DM-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_string')
dict_string_gene_HS = df_HS['id_gene'].to_dict()
dict_string_gene_MM = df_MM['id_gene'].to_dict()
dict_string_gene_DM = df_DM['id_gene'].to_dict()
print('Loading {celltype:s} meta genes'.format(celltype=celltype))
path = '../../02-core_genes/results/'
dfM = pd.read_csv(path + 'meta-genes/meta-{celltype:s}-genes.csv.gz'.format(celltype=celltype), index_col='id_eggnog', usecols=['id_eggnog', 'id_string_HS', 'id_string_MM', 'id_string_DM'])
dfM['id_string_HS'] = dfM['id_string_HS'].apply(lambda x: x.split(',') if not
|
pd.isnull(x)
|
pandas.isnull
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy.io.votable import parse
from sh import bzip2
from ...lib.context_managers import cd
# =============================================================================
# CONSTANTS
# =============================================================================
PATH = os.path.abspath(os.path.dirname(__file__))
CATALOG_PATH = os.path.join(PATH, "carpyncho_catalog.pkl")
# =============================================================================
# BUILD
# =============================================================================
def get_ogle_3_resume():
with cd(PATH):
bzip2("-f", "-dk", "ogleIII_all.csv.bz2")
df = pd.read_csv("ogleIII_all.csv")
ra = df["RA"].apply(
lambda d: d.replace(":", "h", 1).replace(":", "m", 1) + "s")
dec = df["Decl"].apply(
lambda d: d.replace(":", "d", 1).replace(":", "m", 1) + "s")
coords = SkyCoord(ra, dec, frame='icrs')
df['ra'] = pd.Series(coords.ra.deg, index=df.index)
df['dec'] = pd.Series(coords.dec.deg, index=df.index)
df["cls"] = df["Type"] + "-" + df["Subtype"]
df = df[["ID", "ra", "dec", "cls"]]
df["catalog"] = pd.Series("OGLE-3", index=df.index)
os.remove("ogleIII_all.csv")
return df
def get_ogle_4_resume():
with cd(PATH):
bzip2("-f", "-dk", "ogle4.csv.bz2")
df = pd.read_csv("ogle4.csv")
def _ra(d):
d = d.replace(":", "h", 1).replace(":", "m", 1)
return d.replace(":", ".") + "s"
ra = df["ra"].apply(_ra)
def _dec(d):
d = d.replace(":", "d", 1).replace(":", "m", 1)
return d.replace(":", ".") + "s"
dec = df["dec"].apply(_dec)
coords = SkyCoord(ra, dec, frame='icrs')
df['ra'] = pd.Series(coords.ra.deg, index=df.index)
df['dec'] = pd.Series(coords.dec.deg, index=df.index)
df["ID"] = df["id"]
df = df[["ID", "ra", "dec", "cls"]]
df["catalog"] = pd.Series("OGLE-4", index=df.index)
# ~ os.remove("ogle4.csv")
return df
def get_vizier_resume():
with cd(PATH):
bzip2("-f", "-dk", "vizier_votable.vot.bz2")
votable = parse("vizier_votable.vot")
table = votable.get_first_table().to_table(use_names_over_ids=True)
df = table.to_pandas()
del votable, table
df = df[["OID", "RAJ2000", "DEJ2000", "Type"]].copy()
df.columns = "ID", "ra", "dec", "cls"
df["catalog"] = "vizier"
# solo las RRLyrae
flt = 'RRAB', 'RRC', 'RRD'
df = df[df.cls.isin(flt)]
def change_type(t):
subpart = {
'RRAB': "RRab",
'RRC': "RRc",
'RRD': "RRd"
}[t]
return "RRLyr-" + subpart
df["cls"] = df.cls.apply(change_type)
# ~ os.remove("vizier_votable.vot")
return df
def build():
print("Bulding Vizier")
vizier = get_vizier_resume()
print("Building OGLE III")
# ~ ogle3 = get_ogle_3_resume()
print("Building OGLE IV")
ogle4 = get_ogle_4_resume()
print("Merging")
# ~ catalog = pd.concat((ogle3, ogle4, vizier), ignore_index=True)
print("Saving catalog")
# ~ catalog.to_pickle(CATALOG_PATH)
# =============================================================================
# LOAD
# =============================================================================
def load():
return
|
pd.read_pickle(CATALOG_PATH)
|
pandas.read_pickle
|
# coding: utf-8
# In[1]:
#first commit -Richie
import pandas as pd
import numpy as np
# In[2]:
data_message = pd.read_csv('../../data/raw_data/AAPL_05222012_0930_1300_message.tar.gz',compression='gzip')
data_lob = pd.read_csv('../../data/raw_data/AAPL_05222012_0930_1300_LOB_2.tar.gz',compression='gzip')
# In[3]:
#drop redundant time
col_names=data_lob.columns
delete_list=[i for i in col_names if 'UPDATE_TIME' in i]
for i in delete_list:
data_lob=data_lob.drop(i,1)
# In[4]:
#functions for renaming
def rename(txt):
txt=txt[16:].split('..')[0]
index=0
ask_bid=''
p_v=''
if txt[-2].isdigit():
index=txt[-2:]
else:
index=txt[-1]
if txt[:3]=="BID":
ask_bid='bid'
else:
ask_bid='ask'
if txt[4:9]=="PRICE":
p_v='P'
else:
p_v='V'
return('_'.join([p_v,index,ask_bid]))
# In[5]:
#rename columns
col_names=data_lob.columns
new_col_names=[]
new_col_names.append('index')
new_col_names.append('Time')
for i in col_names[2:]:
new_col_names.append(rename(i))
len(new_col_names)
data_lob.columns=new_col_names
# In[6]:
#feature: bid-ask spreads and mid price
for i in list(range(1, 11)):
bid_ask_col_name='_'.join(['spreads',str(i)])
p_i_ask='_'.join(['P',str(i),'ask'])
p_i_bid='_'.join(['P',str(i),'bid'])
data_lob[bid_ask_col_name]=data_lob[p_i_ask]-data_lob[p_i_bid]
mid_price_col_name = '_'.join(['mid_price',str(i)])
data_lob[mid_price_col_name]=(data_lob[p_i_ask]+data_lob[p_i_bid])/2
# In[7]:
#convert time
def timetransform(r):
# transform the time to millisecond, starting from 0
timestr = r
return (int(timestr[11:13]) - 9) * 60**2 + (int(timestr[14:16]) - 30) * 60 + float(timestr[17:])
time = list(data_lob['Time'])
time_new = [timetransform(i) for i in time]
data_lob["Time"] = time_new
# In[8]:
time = list(data_message['Time'])
time_new = [timetransform(i) for i in time]
data_message["Time"] = time_new
# In[9]:
#price difference
data_lob['P_diff_ask_10_1']=data_lob['P_10_ask']-data_lob['P_1_ask']
data_lob['P_diff_bid_1_10']=data_lob['P_1_bid']-data_lob['P_1_bid']
for i in list(range(1, 10)):
P_diff_ask_i_name='_'.join(['P','diff','ask',str(i),str(i+1)])
P_diff_bid_i_name='_'.join(['P','diff','bid',str(i),str(i+1)])
P_i_ask='_'.join(['P',str(i),'ask'])
P_i1_ask='_'.join(['P',str(i+1),'ask'])
P_i_bid='_'.join(['P',str(i),'bid'])
P_i1_bid='_'.join(['P',str(i+1),'bid'])
data_lob[P_diff_ask_i_name]=abs(data_lob[P_i1_ask]-data_lob[P_i_ask])
data_lob[P_diff_bid_i_name]=abs(data_lob[P_i1_bid]-data_lob[P_i_bid])
# In[10]:
#mean price and volumns
p_ask_list=['_'.join(['P',str(i),'ask']) for i in list(range(1, 11))]
p_bid_list=['_'.join(['P',str(i),'bid']) for i in list(range(1, 11))]
v_ask_list=['_'.join(['V',str(i),'ask']) for i in list(range(1, 11))]
v_bid_list=['_'.join(['V',str(i),'bid']) for i in list(range(1, 11))]
data_lob['Mean_ask_price']=0.0
data_lob['Mean_bid_price']=0.0
data_lob['Mean_ask_volumn']=0.0
data_lob['Mean_bid_volumn']=0.0
for i in list(range(0, 10)):
data_lob['Mean_ask_price']+=data_lob[p_ask_list[i]]
data_lob['Mean_bid_price']+=data_lob[p_bid_list[i]]
data_lob['Mean_ask_volumn']+=data_lob[v_ask_list[i]]
data_lob['Mean_bid_volumn']+=data_lob[v_bid_list[i]]
data_lob['Mean_ask_price']/=10
data_lob['Mean_bid_price']/=10
data_lob['Mean_ask_volumn']/=10
data_lob['Mean_bid_volumn']/=10
# In[11]:
#accumulated difference
p_ask_list=['_'.join(['P',str(i),'ask']) for i in list(range(1, 11))]
p_bid_list=['_'.join(['P',str(i),'bid']) for i in list(range(1, 11))]
v_ask_list=['_'.join(['V',str(i),'ask']) for i in list(range(1, 11))]
v_bid_list=['_'.join(['V',str(i),'bid']) for i in list(range(1, 11))]
data_lob['Accum_diff_price']=0.0
data_lob['Accum_diff_volumn']=0.0
for i in list(range(0, 10)):
data_lob['Accum_diff_price']+=data_lob[p_ask_list[i]]-data_lob[p_bid_list[i]]
data_lob['Accum_diff_volumn']+=data_lob[v_ask_list[i]]-data_lob[v_bid_list[i]]
data_lob['Accum_diff_price']/=10
data_lob['Accum_diff_volumn']/=10
# In[12]:
# #price and volumn derivatives
# p_ask_list=['_'.join(['P',str(i),'ask']) for i in list(range(1, 11))]
# p_bid_list=['_'.join(['P',str(i),'bid']) for i in list(range(1, 11))]
# v_ask_list=['_'.join(['V',str(i),'ask']) for i in list(range(1, 11))]
# v_bid_list=['_'.join(['V',str(i),'bid']) for i in list(range(1, 11))]
# #data_lob['Time_diff']=list(np.zeros(30)+1)+list(np.array(data_lob['Time'][30:])-np.array(data_lob['Time'][:-30]))
# for i in list(range(0, 10)):
# P_ask_i_deriv='_'.join(['P','ask',str(i+1),'deriv'])
# P_bid_i_deriv='_'.join(['P','bid',str(i+1),'deriv'])
# V_ask_i_deriv='_'.join(['V','ask',str(i+1),'deriv'])
# V_bid_i_deriv='_'.join(['V','bid',str(i+1),'deriv'])
# data_lob[P_ask_i_deriv]=list(np.zeros(30))+list(np.array(data_lob[p_ask_list[i]][30:])-np.array(data_lob[p_ask_list[i]][:-30]))
# data_lob[P_bid_i_deriv]=list(np.zeros(30))+list(np.array(data_lob[p_bid_list[i]][30:])-np.array(data_lob[p_bid_list[i]][:-30]))
# data_lob[V_ask_i_deriv]=list(np.zeros(30))+list(np.array(data_lob[v_ask_list[i]][30:])-np.array(data_lob[v_ask_list[i]][:-30]))
# data_lob[V_bid_i_deriv]=list(np.zeros(30))+list(np.array(data_lob[v_bid_list[i]][30:])-np.array(data_lob[v_bid_list[i]][:-30]))
# data_lob[P_ask_i_deriv]/=30
# data_lob[P_bid_i_deriv]/=30
# data_lob[V_ask_i_deriv]/=30
# data_lob[V_bid_i_deriv]/=30
# #price and volumn derivatives
p_ask_list=['_'.join(['P',str(i),'ask']) for i in list(range(1, 11))]
p_bid_list=['_'.join(['P',str(i),'bid']) for i in list(range(1, 11))]
v_ask_list=['_'.join(['V',str(i),'ask']) for i in list(range(1, 11))]
v_bid_list=['_'.join(['V',str(i),'bid']) for i in list(range(1, 11))]
#data_lob['Time_diff']=list(np.zeros(30)+1)+list(np.array(data_lob['Time'][30:])-np.array(data_lob['Time'][:-30]))
for i in list(range(0, 10)):
P_ask_i_deriv='_'.join(['P','ask',str(i+1),'deriv'])
P_bid_i_deriv='_'.join(['P','bid',str(i+1),'deriv'])
V_ask_i_deriv='_'.join(['V','ask',str(i+1),'deriv'])
V_bid_i_deriv='_'.join(['V','bid',str(i+1),'deriv'])
data_lob[P_ask_i_deriv]=list(np.zeros(1000))+list(np.array(data_lob[p_ask_list[i]][1000:])-np.array(data_lob[p_ask_list[i]][:-1000]))
data_lob[P_bid_i_deriv]=list(np.zeros(1000))+list(np.array(data_lob[p_bid_list[i]][1000:])-np.array(data_lob[p_bid_list[i]][:-1000]))
data_lob[V_ask_i_deriv]=list(np.zeros(1000))+list(np.array(data_lob[v_ask_list[i]][1000:])-np.array(data_lob[v_ask_list[i]][:-1000]))
data_lob[V_bid_i_deriv]=list(np.zeros(1000))+list(np.array(data_lob[v_bid_list[i]][1000:])-np.array(data_lob[v_bid_list[i]][:-1000]))
data_lob[P_ask_i_deriv]/=1000
data_lob[P_bid_i_deriv]/=1000
data_lob[V_ask_i_deriv]/=1000
data_lob[V_bid_i_deriv]/=1000
# In[ ]:
#set labels
diff=data_lob['mid_price_1']
diff_30=np.array(diff[30:])-np.array(diff[:-30])
label=[]
for i in diff_30:
if i>0.01:
label.append('1')
elif i<(-0.01):
label.append('-1')
else:
label.append('0')
data_lob['labels']=label+list(np.zeros(30))
# In[13]:
#set spread crossing labels
p_now_bid = list(data_lob['P_1_bid'][:-1000])
p_now_ask = list(data_lob['P_1_ask'][:-1000])
p_next_bid=list(data_lob['P_1_bid'][1000:])
p_next_ask=list(data_lob['P_1_ask'][1000:])
label_SC=[]
for i in range(len(p_now_bid)):
if p_next_bid[i]>=p_now_ask[i]:
label_SC.append('+1')
elif p_next_ask[i]<=p_now_bid[i]:
label_SC.append('-1')
else:
label_SC.append('0')
data_lob['labels']=label_SC+list(np.zeros(1000))
# In[14]:
#drop first and last 30 rows
data_out=data_lob[:-1000]
data_out=data_out[1000:]
# data_out=data_lob[:-30]
# data_out=data_out[30:]
# In[15]:
# split train and test
# split2=203350-30
# split1=int(split2*0.5)
# split_test=split1+int(split2*0.25)
# split3=279656-30
# shuttle=data_out[:split2].reindex(np.random.permutation(data_out[:split2].index))
# train=shuttle[:split1]
# test=shuttle[split1:split_test]
# train_test=shuttle[:split_test]
# validation=shuttle[split_test:]
# strategy_validation=data_out[split2:split3]
# split2=203350-30
# split1=int(split2*0.5)
# split_test=split1+int(split2*0.25)
# split3=279656-30
# train=data_out[:split1]
# test=data_out[split1:split_test]
# train_test=data_out[:split_test]
# validation=data_out[split_test:split2]
# strategy_validation=data_out[split2:split3]
#split for SC
# split2=203349-1000
# split1=int(split2*0.5)
# split_test=split1+int(split2*0.25)
# split3=279656-1000
# train=data_out[:split1]
# test=data_out[split1:split_test]
# train_test=data_out[:split_test]
# validation=data_out[split_test:split2]
# strategy_validation=data_out[split2:split3]
#split shuttle for SC
split2=203349-1000
split1=int(split2*0.5)
split_test=split1+int(split2*0.25)
split3=279656-1000
shuttle=data_out[:split2].reindex(np.random.permutation(data_out[:split2].index))
train=shuttle[:split1]
test=shuttle[split1:split_test]
train_test=shuttle[:split_test]
validation=shuttle[split_test:]
strategy_validation=data_out[split2:split3]
# In[17]:
train_sample=train.sample(int(0.1*len(train)))
test_sample=test.sample(int(0.1*len(test)))
train_test_sample=train_test.sample(int(0.1*len(train_test)))
validation_sample=validation.sample(int(0.1*len(validation)))
strategy_validation_sample=strategy_validation.sample(int(0.1*len(strategy_validation)))
# In[22]:
label_percent(np.array(train_test_sample['labels']).astype('int'))
# In[ ]:
data_out[data_out['Time']>(5400+3600)]
# In[ ]:
diff=data_lob['mid_price_1']
diff_10=np.array(diff[10:])-np.array(diff[:-10])
diff_20=np.array(diff[20:])-np.array(diff[:-20])
diff_30=np.array(diff[30:])-np.array(diff[:-30])
diff_40=np.array(diff[40:])-np.array(diff[:-40])
print('Delta=10:')
label_percent(diff_10)
print('Delta=20:')
label_percent(diff_20)
print('Delta=30:')
label_percent(diff_30)
print('Delta=40:')
label_percent(diff_40)
# Delta=10:
# Pos:0.31; Neg:0.33; Zero:0.36;
# Delta=20:
# Pos:0.39; Neg:0.42; Zero:0.19;
# Delta=30:
# Pos:0.42; Neg:0.46; Zero:0.11;
# Delta=40:
# Pos:0.44; Neg:0.48; Zero:0.07;
# In[19]:
def label_percent(v):
l=len(v)
pos=sum(x > 0 for x in v)/l
neg=sum(x < 0 for x in v)/l
zero=sum(x == 0 for x in v)/l
print('Pos:'+str("{0:.2f}".format(pos))+"; Neg:"+str("{0:.2f}".format(neg))+'; Zero:'+str("{0:.2f}".format(zero))+';')
# In[24]:
# train.to_csv('../../data/output/model_clean_data/train.csv')
# test.to_csv('../../data/output/model_clean_data/test.csv')
# validation.to_csv('../../data/output/model_clean_data/validation.csv')
# train_test.to_csv('../../data/output/model_clean_data/train_test.csv')
# strategy_validation.to_csv('../../data/output/model_clean_data/strategy_validation.csv')
train_sample.to_csv('../../data/output/model_clean_data/train.csv')
test_sample.to_csv('../../data/output/model_clean_data/test.csv')
validation_sample.to_csv('../../data/output/model_clean_data/validation.csv')
train_test_sample.to_csv('../../data/output/model_clean_data/train_test.csv')
strategy_validation_sample.to_csv('../../data/output/model_clean_data/strategy_validation.csv')
# In[ ]:
data_out[:1000].to_csv('/Users/Richie/Desktop/sample.csv')
# In[64]:
#combine train test validation
data_train_test1 = pd.read_csv('../../data/output/model_clean_data/SC_shuffle/train_test.tar.gz',compression='gzip')
data_validation1 =
|
pd.read_csv('../../data/output/model_clean_data/SC_shuffle/validation.tar.gz',compression='gzip')
|
pandas.read_csv
|
from pandas.testing import assert_frame_equal
import pandas as pd
from sparkmagic.utils.utils import coerce_pandas_df_to_numeric_datetime
def test_no_coercing():
records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'},
{u'buildingID': 1, u'date': u'random', u'temp_diff': u'0adsf'}]
desired_df = pd.DataFrame(records)
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
|
assert_frame_equal(desired_df, df)
|
pandas.testing.assert_frame_equal
|
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result =
|
concat([s1, df, s2, s2, s1], axis=1)
|
pandas.concat
|
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
isna,
notna,
)
import pandas._testing as tm
def test_expanding_corr(series):
A = series.dropna()
B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().corr(B)
rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_count(series):
result = series.expanding(min_periods=0).count()
tm.assert_almost_equal(
result, series.rolling(window=len(series), min_periods=0).count()
)
def test_expanding_quantile(series):
result = series.expanding().quantile(0.5)
rolling_result = series.rolling(window=len(series), min_periods=1).quantile(0.5)
tm.assert_almost_equal(result, rolling_result)
def test_expanding_cov(series):
A = series
B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().cov(B)
rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_cov_pairwise(frame):
result = frame.expanding().cov()
rolling_result = frame.rolling(window=len(frame), min_periods=1).cov()
tm.assert_frame_equal(result, rolling_result)
def test_expanding_corr_pairwise(frame):
result = frame.expanding().corr()
rolling_result = frame.rolling(window=len(frame), min_periods=1).corr()
tm.assert_frame_equal(result, rolling_result)
@pytest.mark.parametrize(
"func,static_comp",
[("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
ids=["sum", "mean", "max", "min"],
)
def test_expanding_func(func, static_comp, frame_or_series):
data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
result = getattr(data.expanding(min_periods=1, axis=0), func)()
assert isinstance(result, frame_or_series)
if frame_or_series is Series:
tm.assert_almost_equal(result[10], static_comp(data[:11]))
else:
tm.assert_series_equal(
result.iloc[10], static_comp(data[:11]), check_names=False
)
@pytest.mark.parametrize(
"func,static_comp",
[("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
ids=["sum", "mean", "max", "min"],
)
def test_expanding_min_periods(func, static_comp):
ser = Series(np.random.randn(50))
result = getattr(ser.expanding(min_periods=30, axis=0), func)()
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
# min_periods is working correctly
result = getattr(ser.expanding(min_periods=15, axis=0), func)()
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(np.random.randn(20))
result = getattr(ser2.expanding(min_periods=5, axis=0), func)()
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = getattr(ser.expanding(min_periods=0, axis=0), func)()
result1 = getattr(ser.expanding(min_periods=1, axis=0), func)()
tm.assert_almost_equal(result0, result1)
result = getattr(ser.expanding(min_periods=1, axis=0), func)()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
def test_expanding_apply(engine_and_raw, frame_or_series):
engine, raw = engine_and_raw
data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
result = data.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isinstance(result, frame_or_series)
if frame_or_series is Series:
tm.assert_almost_equal(result[9], np.mean(data[:11]))
else:
tm.assert_series_equal(result.iloc[9], np.mean(data[:11]), check_names=False)
def test_expanding_min_periods_apply(engine_and_raw):
engine, raw = engine_and_raw
ser = Series(np.random.randn(50))
result = ser.expanding(min_periods=30).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
# min_periods is working correctly
result = ser.expanding(min_periods=15).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(np.random.randn(20))
result = ser2.expanding(min_periods=5).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = ser.expanding(min_periods=0).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
result1 = ser.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
tm.assert_almost_equal(result0, result1)
result = ser.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_expanding_apply_consistency_sum_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
func=f, raw=True
)
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_expanding_apply_consistency_sum_no_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if no_nans:
if f is np.nansum and min_periods == 0:
pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
func=f, raw=True
)
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
mean_x = x.expanding(min_periods=min_periods).mean()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
if ddof == 0:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (x * x).expanding(min_periods=min_periods).mean()
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var_constant(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if ddof == 1:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_std(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_cov(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
cov_x_x = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_series_cov_corr(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if isinstance(x, Series):
var_x_plus_y = (x + x).expanding(min_periods=min_periods).var(ddof=ddof)
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
var_y = x.expanding(min_periods=min_periods).var(ddof=ddof)
cov_x_y = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.expanding(min_periods=min_periods).corr(x)
std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
std_y = x.expanding(min_periods=min_periods).std(ddof=ddof)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if ddof == 0:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_x = x.expanding(min_periods=min_periods).mean()
mean_y = x.expanding(min_periods=min_periods).mean()
mean_x_times_y = (x * x).expanding(min_periods=min_periods).mean()
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_mean(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
result = x.expanding(min_periods=min_periods).mean()
expected = (
x.expanding(min_periods=min_periods).sum()
/ x.expanding(min_periods=min_periods).count()
)
tm.assert_equal(result, expected.astype("float64"))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_constant(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
if is_constant:
count_x = x.expanding().count()
mean_x = x.expanding(min_periods=min_periods).mean()
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = x.expanding(min_periods=min_periods).corr(x)
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_var_debiasing_factors(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
# check variance debiasing factors
var_unbiased_x = x.expanding(min_periods=min_periods).var()
var_biased_x = x.expanding(min_periods=min_periods).var(ddof=0)
var_debiasing_factors_x = x.expanding().count() / (
x.expanding().count() - 1.0
).replace(0.0, np.nan)
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
@pytest.mark.parametrize(
"f",
[
lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)),
lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)),
],
)
def test_moment_functions_zero_length_pairwise(f):
df1 = DataFrame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
df1_expected = DataFrame(
index=MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
)
df2_expected = DataFrame(
index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),
columns=Index(["a"], name="foo"),
dtype="float64",
)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().count(),
lambda x: x.expanding(min_periods=5).cov(x, pairwise=False),
lambda x: x.expanding(min_periods=5).corr(x, pairwise=False),
lambda x: x.expanding(min_periods=5).max(),
lambda x: x.expanding(min_periods=5).min(),
lambda x: x.expanding(min_periods=5).sum(),
lambda x: x.expanding(min_periods=5).mean(),
lambda x: x.expanding(min_periods=5).std(),
lambda x: x.expanding(min_periods=5).var(),
lambda x: x.expanding(min_periods=5).skew(),
lambda x: x.expanding(min_periods=5).kurt(),
lambda x: x.expanding(min_periods=5).quantile(0.5),
lambda x: x.expanding(min_periods=5).median(),
lambda x: x.expanding(min_periods=5).apply(sum, raw=False),
lambda x: x.expanding(min_periods=5).apply(sum, raw=True),
],
)
def test_moment_functions_zero_length(f):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
df2_expected = df2
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
def test_expanding_apply_empty_series(engine_and_raw):
engine, raw = engine_and_raw
ser = Series([], dtype=np.float64)
tm.assert_series_equal(
ser, ser.expanding().apply(lambda x: x.mean(), raw=raw, engine=engine)
)
def test_expanding_apply_min_periods_0(engine_and_raw):
# GH 8080
engine, raw = engine_and_raw
s = Series([None, None, None])
result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw, engine=engine)
expected = Series([1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_expanding_cov_diff_index():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().cov(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().cov(s2)
expected = Series([None, None, None, 4.5])
tm.assert_series_equal(result, expected)
def test_expanding_corr_diff_index():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().corr(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 =
|
Series([7, 9, 10], index=[0, 2, 3])
|
pandas.Series
|
import logging
from fastapi import APIRouter
import pandas as pd
from pydantic import BaseModel, Field, validator
import os
from dotenv import load_dotenv
from app.spotify_client import SpotifyAPI
load_dotenv()
client_id = os.getenv("SPOTIPY_CLIENT_ID")
client_secret = os.getenv("SPOTIPY_CLIENT_SECRET", default="<PASSWORD> secret")
log = logging.getLogger(__name__)
router = APIRouter()
spotify = SpotifyAPI(client_id, client_secret)
@router.post('/predictfav')
async def predictfav(search_term: str, type_of_search: str):
"""(CURRENTLY IN TEST MODE) Make song predictions from favorite songs
and return Song ID's in an array"""
songs = spotify.search(search_term, search_type=type_of_search)
return songs
# parse JSON from mood
class Itemmood(BaseModel):
"""Use this data model to send the request body correctly,
so data is received back (in JSON) based on the moods selected."""
moods: list=Field(..., example=[{"mood": "Danceability", "value": "high"},
{"mood": "Energy", "value": "medium"},
{"mood": "Speechiness", "value": "medium"},
{"mood": "Acousticness", "value": "low"}])
def to_df(self):
"""Convert pydantic object to pandas dataframe with 1 row."""
return
|
pd.DataFrame(self)
|
pandas.DataFrame
|
"""Argument parser for obtaining WR distances"""
import argparse
import wrdists.collated_functions as cf
import pandas as pd
def main():
"""Incorporate into function to be run using the command line when installed."""
parser = argparse.ArgumentParser()
"""Set Pandas dataframes to show all elements"""
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
"""Necessary arguments to calculate distances"""
parser.add_argument('-p', help='Gaia parallax (mas) when in single mode (float). Column number containing parallax \
list when in file mode (int).', action='store', dest='par', type=float, required=True)
parser.add_argument('-pe', help='Gaia parallax error (mas) when in single mode (float). Column number containing \
parallax error list when in file mode (int).', action='store', dest='parerr',
type=float, required=True)
parser.add_argument('-g', help='Gaia G band magnitude when in single mode (float). Column number containing G mag list \
when in file mode (int).', action='store', dest='g', type=float, required=True)
parser.add_argument('-ra', help='Gaia right ascension (RA) when in single mode (float). Column number containing RA \
list when in file mode (int).', action='store', dest='ra', type=float, required=True)
parser.add_argument('-dec', help='Gaia declination (DEC) when in single mode (float). Column number containing DEC \
list when in file mode (int).', action='store', dest='dec', type=float, required=True)
parser.add_argument('-ast', help='Gaia astrometric excess noise when in single mode (float). Column number containing \
excess noise list when in file mode (int).', action='store', dest='ast', type=float,
required=True)
parser.add_argument('-n', help='Star name or identifier (e.g WR1) when in single mode (str). Column number containing \
list of names when in file mode (int).', action='store', dest='name', type=str,
required=True)
"""Optional arguments"""
# Load in a list of results:
parser.add_argument('-fin', help='File path from which to load a file containing parameters when executing for \
lists of stars (str).', action='store', dest='list_mode_fin', type=str, default=False)
parser.add_argument('-fout', help='File path to store ouput to when executing with a file input (str).',
action='store', dest='list_mode_fout', type=str, default=False)
parser.add_argument('-ph', help='Preserve the header if the file input contains one (no argument)',
action='store_true', dest='header', default=False)
parser.add_argument('-dmt', help='Specify a delimiter for the input file (str).', action='store', dest='delimit', type=str,
default=',')
parser.add_argument('-zpt_list', help='Specify the column number containing the zero points (if used) (int).', action='store', dest='zpt_list', type=int,
default=False)
# Other options:
parser.add_argument('-zpt', help='Set the zero point of the parallaxes (mas) to an alternative value \
(default = -0.029 mas) (float).', action='store', default=-0.029, type=float, dest='zpt')
parser.add_argument('-md','--minimum_dist', help='Set the minimum distance of the prior (pc), which is useful for \
constraining the prior (float).', action='store', default=300, type=float,
dest='md')
parser.add_argument('-es','--error_sigma', help='Set the credible interval coverage range (float).', action='store',
default=0.68, type=float, dest='esig')
# Save plots and/or posterior distribution:
parser.add_argument('-pt', '--plot', help='Plot the output distributions of the prior, likelihood and posterior, \
along with the credible intervals (uncertainty bounds) and most likely \
distance (default = False). The input string should be the path to save \
the plotted image(s) (str).', action='store', default=False, type=str,
dest='plot_data')
parser.add_argument('-dist', '--distribution', help='Saves the posterior distance distribution as a csv '
'which can be loaded and used in another python program. The '
'input string should be the path to save the distribution data (str).',
action='store', default=False, type=str, dest='save_distribution')
# Exclude dust distribution or parallax resizing:
parser.add_argument('-ed','--exclude_dust', help='Exclude dust from the prior (use HII regions only), which may be \
useful to compare the effects of different priors (default = False) (no argument).',
action='store_true', default=False, dest='dust_exc')
parser.add_argument('-ee','--exclude_err', help='Exclude resizing of parallax errors (compared to external catalogues, \
Arenou et al. 2018) and zero point correction. May be useful for data \
comparison or application to non Gaia parallaxes (e.g Hipparcos) \
(default = False) (no argument)', action='store_true', default=False,
dest='err_exc')
"""Run the code to get distances"""
args = parser.parse_args()
if args.list_mode_fin:
args.par = int(args.par)
args.parerr = int(args.parerr)
args.g = int(args.g)
args.ra = int(args.ra)
args.dec = int(args.dec)
args.ast = int(args.ast)
args.name = int(args.name)
# Convert params to integer types.
if args.header:
data = pd.read_csv(args.list_mode_fin, delimiter=args.delimit)
else:
data = pd.read_csv(args.list_mode_fin, header=None, delimiter=args.delimit)
# Load in the file.
pars = data.iloc[:, args.par].values
parserrs = data.iloc[:, args.parerr].values
phots = data.iloc[:, args.g].values
ras = data.iloc[:, args.ra].values
decs = data.iloc[:, args.dec].values
asts = data.iloc[:, args.ast].values
names = data.iloc[:, args.name].values
# Slice out columns with parameters (according to the column number).
if args.zpt_list:
print(args.zpt_list)
# If zero point list is specified, load zero point from csv file:
zpt_data = data.iloc[:, args.zpt_list].values
else:
# use single zero point for all WR stars.
zpt_data = args.zpt
maxr, upper, lower, heights, heights_upper, heights_lower, omega, omega_err, flags = cf.run_dist(pars, parserrs,
phots, ras, decs, asts, names,
wdust= not args.dust_exc,
werr= not args.err_exc,
md=args.md, zpt_data=zpt_data,
err_sig=args.esig,
plot_image=args.plot_data,
save_distributions=args.save_distribution)
# Calculate the distances for all data points in the list.
data_dict = {'Distance (pc)':maxr, 'Upper distance (pc)':upper,
'Lower distance (pc)':lower,
'Flags for distance':flags,
'Distance from plane (|z|) (pc)':heights,
'|z| upper bound (pc)':heights_upper,
'|z| lower bound':heights_lower,
'Omega (zero point corrected parallax) (mas)':omega}
if not args.err_exc:
data_dict.update({'Sigma omega (increased error) (mas)':omega_err})
# If using expanded uncertainty estimates (i.e getting results for DR2 data), this will include the increased
# errors in the results file.
if args.zpt_list:
data_dict.update({'Zero points applied (mas)':zpt_data})
# If loading in a list of individual zero points from the original data file, include a list of the zero points used in the results file.
df =
|
pd.DataFrame(data=data_dict, index=names)
|
pandas.DataFrame
|
"""
Test for the normalization operation
"""
from datetime import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
import pyproj
import xarray as xr
from jdcal import gcal2jd
from numpy.testing import assert_array_almost_equal
from xcube.core.gridmapping import GridMapping
from xcube.core.new import new_cube
from xcube.core.normalize import DatasetIsNotACubeError
from xcube.core.normalize import adjust_spatial_attrs
from xcube.core.normalize import decode_cube
from xcube.core.normalize import encode_cube
from xcube.core.normalize import normalize_coord_vars
from xcube.core.normalize import normalize_dataset
from xcube.core.normalize import normalize_missing_time
# noinspection PyPep8Naming
def assertDatasetEqual(expected, actual):
# this method is functionally equivalent to
# `assert expected == actual`, but it
# checks each aspect of equality separately for easier debugging
assert expected.equals(actual), (expected, actual)
class DecodeCubeTest(TestCase):
def test_cube_stays_cube(self):
dataset = new_cube(variables=dict(a=1, b=2, c=3))
cube, grid_mapping, rest = decode_cube(dataset)
self.assertIs(dataset, cube)
self.assertIsInstance(grid_mapping, GridMapping)
self.assertTrue(grid_mapping.crs.is_geographic)
self.assertIsInstance(rest, xr.Dataset)
self.assertEqual(set(), set(rest.data_vars))
def test_no_cube_vars_are_dropped(self):
dataset = new_cube(variables=dict(a=1, b=2, c=3))
dataset = dataset.assign(
d=xr.DataArray([8, 9, 10], dims='level'),
crs=xr.DataArray(0, attrs=pyproj.CRS.from_string('CRS84').to_cf()),
)
self.assertEqual({'a', 'b', 'c', 'd', 'crs'}, set(dataset.data_vars))
cube, grid_mapping, rest = decode_cube(dataset)
self.assertIsInstance(cube, xr.Dataset)
self.assertIsInstance(grid_mapping, GridMapping)
self.assertEqual({'a', 'b', 'c'}, set(cube.data_vars))
self.assertEqual(pyproj.CRS.from_string('CRS84'), grid_mapping.crs)
self.assertIsInstance(rest, xr.Dataset)
self.assertEqual({'d', 'crs'}, set(rest.data_vars))
def test_encode_is_inverse(self):
dataset = new_cube(variables=dict(a=1, b=2, c=3),
x_name='x', y_name='y')
dataset = dataset.assign(
d=xr.DataArray([8, 9, 10], dims='level'),
crs=xr.DataArray(0, attrs=pyproj.CRS.from_string('CRS84').to_cf()),
)
cube, grid_mapping, rest = decode_cube(dataset)
dataset2 = encode_cube(cube, grid_mapping, rest)
self.assertEqual(set(dataset.data_vars), set(dataset2.data_vars))
self.assertIn('crs', dataset2.data_vars)
def test_no_cube_vars_found(self):
dataset = new_cube()
self.assertEqual(set(), set(dataset.data_vars))
with self.assertRaises(DatasetIsNotACubeError) as cm:
decode_cube(dataset, force_non_empty=True)
self.assertEqual("No variables found with dimensions"
" ('time', [...] 'lat', 'lon')"
" or dimension sizes too small",
f'{cm.exception}')
def test_no_grid_mapping(self):
dataset = xr.Dataset(dict(a=[1, 2, 3], b=0.5))
with self.assertRaises(DatasetIsNotACubeError) as cm:
decode_cube(dataset)
self.assertEqual("Failed to detect grid mapping:"
" cannot find any grid mapping in dataset",
f'{cm.exception}')
def test_grid_mapping_not_geographic(self):
dataset = new_cube(x_name='x', y_name='y',
variables=dict(a=0.5), crs='epsg:25832')
with self.assertRaises(DatasetIsNotACubeError) as cm:
decode_cube(dataset, force_geographic=True)
self.assertEqual("Grid mapping must use geographic CRS,"
" but was 'ETRS89 / UTM zone 32N'",
f'{cm.exception}')
class EncodeCubeTest(TestCase):
def test_geographical_crs(self):
cube = new_cube(variables=dict(a=1, b=2, c=3))
gm = GridMapping.from_dataset(cube)
dataset = encode_cube(cube, gm)
self.assertIs(cube, dataset)
dataset = encode_cube(cube, gm,
xr.Dataset(dict(d=True)))
self.assertIsInstance(dataset, xr.Dataset)
self.assertEqual({'a', 'b', 'c', 'd'}, set(dataset.data_vars))
def test_non_geographical_crs(self):
cube = new_cube(x_name='x',
y_name='y',
crs='epsg:25832',
variables=dict(a=1, b=2, c=3))
gm = GridMapping.from_dataset(cube)
dataset = encode_cube(cube,
gm,
xr.Dataset(dict(d=True)))
self.assertIsInstance(dataset, xr.Dataset)
self.assertEqual({'a', 'b', 'c', 'd', 'crs'}, set(dataset.data_vars))
class TestNormalize(TestCase):
def test_normalize_zonal_lat_lon(self):
resolution = 10
lat_size = 3
lat_coords = np.arange(0, 30, resolution)
lon_coords = [i + 5. for i in np.arange(-180.0, 180.0, resolution)]
lon_size = len(lon_coords)
one_more_dim_size = 2
one_more_dim_coords = np.random.random(2)
var_values_1_1d = xr.DataArray(np.random.random(lat_size),
coords=[('latitude_centers', lat_coords)],
dims=['latitude_centers'],
attrs=dict(chunk_sizes=[lat_size],
dimensions=['latitude_centers']))
var_values_1_1d.encoding = {'chunks': (lat_size,)}
var_values_1_2d = xr.DataArray(np.array([var_values_1_1d.values for _ in lon_coords]).T,
coords={'lat': lat_coords, 'lon': lon_coords},
dims=['lat', 'lon'],
attrs=dict(chunk_sizes=[lat_size, lon_size],
dimensions=['lat', 'lon']))
var_values_1_2d.encoding = {'chunks': (lat_size, lon_size)}
var_values_2_2d = xr.DataArray(np.random.random(lat_size * one_more_dim_size).
reshape(lat_size, one_more_dim_size),
coords={'latitude_centers': lat_coords,
'one_more_dim': one_more_dim_coords},
dims=['latitude_centers', 'one_more_dim'],
attrs=dict(chunk_sizes=[lat_size, one_more_dim_size],
dimensions=['latitude_centers', 'one_more_dim']))
var_values_2_2d.encoding = {'chunks': (lat_size, one_more_dim_size)}
var_values_2_3d = xr.DataArray(np.array([var_values_2_2d.values for _ in lon_coords]).T,
coords={'one_more_dim': one_more_dim_coords,
'lat': lat_coords,
'lon': lon_coords, },
dims=['one_more_dim', 'lat', 'lon', ],
attrs=dict(chunk_sizes=[one_more_dim_size,
lat_size,
lon_size],
dimensions=['one_more_dim', 'lat', 'lon']))
var_values_2_3d.encoding = {'chunks': (one_more_dim_size, lat_size, lon_size)}
dataset = xr.Dataset({'first': var_values_1_1d, 'second': var_values_2_2d})
expected = xr.Dataset({'first': var_values_1_2d, 'second': var_values_2_3d})
expected = expected.assign_coords(
lon_bnds=xr.DataArray([[i - (resolution / 2), i + (resolution / 2)] for i in expected.lon.values],
dims=['lon', 'bnds']))
expected = expected.assign_coords(
lat_bnds=xr.DataArray([[i - (resolution / 2), i + (resolution / 2)] for i in expected.lat.values],
dims=['lat', 'bnds']))
actual = normalize_dataset(dataset)
xr.testing.assert_equal(actual, expected)
self.assertEqual(actual.first.chunk_sizes, expected.first.chunk_sizes)
self.assertEqual(actual.second.chunk_sizes, expected.second.chunk_sizes)
def test_normalize_lon_lat_2d(self):
"""
Test nominal execution
"""
dims = ('time', 'y', 'x')
attrs = {'valid_min': 0., 'valid_max': 1.}
t_size = 2
y_size = 3
x_size = 4
a_data = np.random.random_sample((t_size, y_size, x_size))
b_data = np.random.random_sample((t_size, y_size, x_size))
time_data = [1, 2]
lat_data = [[10., 10., 10., 10.],
[20., 20., 20., 20.],
[30., 30., 30., 30.]]
lon_data = [[-10., 0., 10., 20.],
[-10., 0., 10., 20.],
[-10., 0., 10., 20.]]
dataset = xr.Dataset({'a': (dims, a_data, attrs),
'b': (dims, b_data, attrs)
},
{'time': (('time',), time_data),
'lat': (('y', 'x'), lat_data),
'lon': (('y', 'x'), lon_data)
},
{'geospatial_lon_min': -15.,
'geospatial_lon_max': 25.,
'geospatial_lat_min': 5.,
'geospatial_lat_max': 35.
}
)
new_dims = ('time', 'lat', 'lon')
expected = xr.Dataset({'a': (new_dims, a_data, attrs),
'b': (new_dims, b_data, attrs)},
{'time': (('time',), time_data),
'lat': (('lat',), [10., 20., 30.]),
'lon': (('lon',), [-10., 0., 10., 20.]),
},
{'geospatial_lon_min': -15.,
'geospatial_lon_max': 25.,
'geospatial_lat_min': 5.,
'geospatial_lat_max': 35.})
actual = normalize_dataset(dataset)
xr.testing.assert_equal(actual, expected)
def test_normalize_lon_lat(self):
"""
Test nominal execution
"""
dataset = xr.Dataset({'first': (['latitude',
'longitude'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'lon'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['lat', 'long'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'lon'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['latitude',
'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['zef', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['zef', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
def test_normalize_does_not_reorder_increasing_lat(self):
first = np.zeros([3, 45, 90])
first[0, :, :] = np.eye(45, 90)
ds = xr.Dataset({
'first': (['time', 'lat', 'lon'], first),
'second': (['time', 'lat', 'lon'], np.zeros([3, 45, 90])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 4)]}).chunk(
chunks={'time': 1})
actual = normalize_dataset(ds)
xr.testing.assert_equal(actual, ds)
def test_normalize_with_missing_time_dim(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101',
'time_coverage_end': '20121231'})
norm_ds = normalize_dataset(ds)
self.assertIsNot(norm_ds, ds)
self.assertEqual(len(norm_ds.coords), 4)
self.assertIn('lon', norm_ds.coords)
self.assertIn('lat', norm_ds.coords)
self.assertIn('time', norm_ds.coords)
self.assertIn('time_bnds', norm_ds.coords)
self.assertEqual(norm_ds.first.shape, (1, 90, 180))
self.assertEqual(norm_ds.second.shape, (1, 90, 180))
self.assertEqual(norm_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-07-01T12:00:00')))
self.assertEqual(norm_ds.coords['time_bnds'][0][0], xr.DataArray(pd.to_datetime('2012-01-01')))
self.assertEqual(norm_ds.coords['time_bnds'][0][1], xr.DataArray(pd.to_datetime('2012-12-31')))
def test_normalize_with_time_called_t(self):
ds = xr.Dataset({'first': (['time', 'lat', 'lon'], np.zeros([4, 90, 180])),
'second': (['time', 'lat', 'lon'], np.zeros([4, 90, 180])),
't': ('time', np.array(['2005-07-02T00:00:00.000000000',
'2006-07-02T12:00:00.000000000',
'2007-07-03T00:00:00.000000000',
'2008-07-02T00:00:00.000000000'], dtype='datetime64[ns]'))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '2005-01-17',
'time_coverage_end': '2008-08-17'})
norm_ds = normalize_dataset(ds)
self.assertIsNot(norm_ds, ds)
self.assertEqual(len(norm_ds.coords), 3)
self.assertIn('lon', norm_ds.coords)
self.assertIn('lat', norm_ds.coords)
self.assertIn('time', norm_ds.coords)
self.assertEqual(norm_ds.first.shape, (4, 90, 180))
self.assertEqual(norm_ds.second.shape, (4, 90, 180))
self.assertEqual(norm_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2005-07-02T00:00')))
def test_normalize_julian_day(self):
"""
Test Julian Day -> Datetime conversion
"""
tuples = [gcal2jd(2000, x, 1) for x in range(1, 13)]
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([88, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([88, 90, 12])),
'lat': np.linspace(-88, 45, 88),
'lon': np.linspace(-178, 178, 90),
'time': [x[0] + x[1] for x in tuples]})
ds.time.attrs['long_name'] = 'time in julian days'
expected = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.zeros([12, 88, 90])),
'second': (['time', 'lat', 'lon'], np.zeros([12, 88, 90])),
'lat': np.linspace(-88, 45, 88),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
expected.time.attrs['long_name'] = 'time'
actual = normalize_dataset(ds)
assertDatasetEqual(actual, expected)
class AdjustSpatialTest(TestCase):
def test_nominal(self):
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_min, lat_max)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0'
' -42.0, -20.0 -42.0))')
def test_nominal_inverted(self):
# Inverted lat
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(88, -88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_max, lat_min)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0'
' -42.0, -20.0 -42.0))')
def test_bnds(self):
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
lat_bnds = np.empty([len(ds.lat), 2])
lon_bnds = np.empty([len(ds.lon), 2])
ds['nv'] = [0, 1]
lat_bnds[:, 0] = ds.lat.values - 2
lat_bnds[:, 1] = ds.lat.values + 2
lon_bnds[:, 0] = ds.lon.values - 2
lon_bnds[:, 1] = ds.lon.values + 2
ds['lat_bnds'] = (['lat', 'nv'], lat_bnds)
ds['lon_bnds'] = (['lon', 'nv'], lon_bnds)
ds.lat.attrs['bounds'] = 'lat_bnds'
ds.lon.attrs['bounds'] = 'lon_bnds'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_min, lat_max)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0'
' -42.0, -20.0 -42.0))')
def test_bnds_inverted(self):
# Inverted lat
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(88, -88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
lat_bnds = np.empty([len(ds.lat), 2])
lon_bnds = np.empty([len(ds.lon), 2])
ds['nv'] = [0, 1]
lat_bnds[:, 0] = ds.lat.values + 2
lat_bnds[:, 1] = ds.lat.values - 2
lon_bnds[:, 0] = ds.lon.values - 2
lon_bnds[:, 1] = ds.lon.values + 2
ds['lat_bnds'] = (['lat', 'nv'], lat_bnds)
ds['lon_bnds'] = (['lon', 'nv'], lon_bnds)
ds.lat.attrs['bounds'] = 'lat_bnds'
ds.lon.attrs['bounds'] = 'lon_bnds'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_max, lat_min)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0 -42.0, -20.0 -42.0))')
def test_once_cell_with_bnds(self):
# Only one cell in lat/lon
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'lat': np.array([52.5]),
'lon': np.array([11.5]),
'lat_bnds': (['lat', 'bnds'], np.array([[52.4, 52.6]])),
'lon_bnds': (['lon', 'bnds'], np.array([[11.4, 11.6]])),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds1 = adjust_spatial_attrs(ds)
self.assertAlmostEqual(ds1.attrs['geospatial_lat_resolution'], 0.2)
self.assertAlmostEqual(ds1.attrs['geospatial_lat_min'], 52.4)
self.assertAlmostEqual(ds1.attrs['geospatial_lat_max'], 52.6)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertAlmostEqual(ds1.attrs['geospatial_lon_resolution'], 0.2)
self.assertAlmostEqual(ds1.attrs['geospatial_lon_min'], 11.4)
self.assertAlmostEqual(ds1.attrs['geospatial_lon_max'], 11.6)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((11.4 52.4, 11.4 52.6, 11.6 52.6, 11.6 52.4, 11.4 52.4))')
def test_once_cell_without_bnds(self):
# Only one cell in lat/lon
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'lat': np.array([52.5]),
'lon': np.array([11.5]),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds2 = adjust_spatial_attrs(ds)
# Datasets should be the same --> not modified
self.assertIs(ds2, ds)
class NormalizeCoordVarsTest(TestCase):
def test_ds_with_potential_coords(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180])),
'lat_bnds': (['lat', 'bnds'], np.zeros([90, 2])),
'lon_bnds': (['lon', 'bnds'], np.zeros([180, 2]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)})
new_ds = normalize_coord_vars(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 4)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('lat_bnds', new_ds.coords)
self.assertIn('lon_bnds', new_ds.coords)
self.assertEqual(len(new_ds.data_vars), 2)
self.assertIn('first', new_ds.data_vars)
self.assertIn('second', new_ds.data_vars)
def test_ds_with_potential_coords_and_bounds(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180])),
'lat_bnds': (['lat', 'bnds'], np.zeros([90, 2])),
'lon_bnds': (['lon', 'bnds'], np.zeros([180, 2])),
'lat': (['lat'], np.linspace(-89.5, 89.5, 90)),
'lon': (['lon'], np.linspace(-179.5, 179.5, 180))})
new_ds = normalize_coord_vars(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 4)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('lat_bnds', new_ds.coords)
self.assertIn('lon_bnds', new_ds.coords)
self.assertEqual(len(new_ds.data_vars), 2)
self.assertIn('first', new_ds.data_vars)
self.assertIn('second', new_ds.data_vars)
def test_ds_with_no_potential_coords(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101'})
new_ds = normalize_coord_vars(ds)
self.assertIs(ds, new_ds)
class NormalizeMissingTimeTest(TestCase):
def test_ds_without_time(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101',
'time_coverage_end': '20121231'})
new_ds = normalize_missing_time(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 4)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('time', new_ds.coords)
self.assertIn('time_bnds', new_ds.coords)
self.assertEqual(new_ds.coords['time'].attrs.get('long_name'), 'time')
self.assertEqual(new_ds.coords['time'].attrs.get('bounds'), 'time_bnds')
self.assertEqual(new_ds.first.shape, (1, 90, 180))
self.assertEqual(new_ds.second.shape, (1, 90, 180))
self.assertEqual(new_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-07-01T12:00:00')))
self.assertEqual(new_ds.coords['time'].attrs.get('long_name'), 'time')
self.assertEqual(new_ds.coords['time'].attrs.get('bounds'), 'time_bnds')
self.assertEqual(new_ds.coords['time_bnds'][0][0], xr.DataArray(pd.to_datetime('2012-01-01')))
self.assertEqual(new_ds.coords['time_bnds'][0][1], xr.DataArray(pd.to_datetime('2012-12-31')))
self.assertEqual(new_ds.coords['time_bnds'].attrs.get('long_name'), 'time')
def test_ds_without_bounds(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101'})
new_ds = normalize_missing_time(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 3)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('time', new_ds.coords)
self.assertNotIn('time_bnds', new_ds.coords)
self.assertEqual(new_ds.first.shape, (1, 90, 180))
self.assertEqual(new_ds.second.shape, (1, 90, 180))
self.assertEqual(new_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-01-01')))
self.assertEqual(new_ds.coords['time'].attrs.get('long_name'), 'time')
self.assertEqual(new_ds.coords['time'].attrs.get('bounds'), None)
def test_ds_without_time_attrs(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)})
new_ds = normalize_missing_time(ds)
self.assertIs(ds, new_ds)
def test_ds_with_cftime(self):
time_data = xr.cftime_range(start='2010-01-01T00:00:00',
periods=6,
freq='D',
calendar='gregorian').values
ds = xr.Dataset({'first': (['time', 'lat', 'lon'], np.zeros([6, 90, 180])),
'second': (['time', 'lat', 'lon'], np.zeros([6, 90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180),
'time': time_data},
attrs={'time_coverage_start': '20120101',
'time_coverage_end': '20121231'})
new_ds = normalize_missing_time(ds)
self.assertIs(ds, new_ds)
def test_normalize_with_missing_time_dim(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101',
'time_coverage_end': '20121231'})
norm_ds = normalize_dataset(ds)
self.assertIsNot(norm_ds, ds)
self.assertEqual(len(norm_ds.coords), 4)
self.assertIn('lon', norm_ds.coords)
self.assertIn('lat', norm_ds.coords)
self.assertIn('time', norm_ds.coords)
self.assertIn('time_bnds', norm_ds.coords)
self.assertEqual(norm_ds.first.shape, (1, 90, 180))
self.assertEqual(norm_ds.second.shape, (1, 90, 180))
self.assertEqual(norm_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-07-01T12:00:00')))
self.assertEqual(norm_ds.coords['time_bnds'][0][0], xr.DataArray(pd.to_datetime('2012-01-01')))
self.assertEqual(norm_ds.coords['time_bnds'][0][1], xr.DataArray(pd.to_datetime('2012-12-31')))
def test_normalize_with_missing_time_dim_from_filename(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
)
ds_encoding = dict(source='20150204_etfgz_20170309_dtsrgth')
ds.encoding.update(ds_encoding)
norm_ds = normalize_dataset(ds)
self.assertIsNot(norm_ds, ds)
self.assertEqual(len(norm_ds.coords), 4)
self.assertIn('lon', norm_ds.coords)
self.assertIn('lat', norm_ds.coords)
self.assertIn('time', norm_ds.coords)
self.assertIn('time_bnds', norm_ds.coords)
self.assertEqual(norm_ds.first.shape, (1, 90, 180))
self.assertEqual(norm_ds.second.shape, (1, 90, 180))
self.assertEqual(norm_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2016-02-21T00:00:00')))
self.assertEqual(norm_ds.coords['time_bnds'][0][0], xr.DataArray(pd.to_datetime('2015-02-04')))
self.assertEqual(norm_ds.coords['time_bnds'][0][1], xr.DataArray(
|
pd.to_datetime('2017-03-09')
|
pandas.to_datetime
|
import functools
import inspect
import os
from functools import singledispatch
from typing import Callable, Collection, Iterable, List, Union
import joblib
import numpy as np
import pandas as pd
import requests
from IPython.display import display
from numpy import ndarray
from pandas.api.types import (
is_categorical_dtype,
is_float,
is_hashable,
is_integer,
is_list_like,
)
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from tqdm.notebook import tqdm
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils import check_consistent_length, compute_sample_weight, deprecated
from ndg_tools._validation import _check_1d
from ndg_tools.typing import FrameOrSeries, ArrayLike
from fuzzywuzzy.process import dedupe, extractOne
from fuzzywuzzy import fuzz
def get_columns(data: DataFrame, subset: Union[str, Iterable[str]]):
if subset is None:
pass
elif isinstance(subset, str):
data = data.loc[:, [subset]].copy()
elif isinstance(subset, Iterable):
data = data.loc[:, list(subset)].copy()
else:
raise TypeError(
f"Expected str or iterable of str, got {type(subset).__name__}."
)
return data
def numeric_cols(data: pd.DataFrame) -> list:
"""Returns a list of all numeric column names.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
Returns
-------
list
All and only the numeric column names.
"""
return data.select_dtypes("number").columns.to_list()
def true_numeric_cols(data: pd.DataFrame, min_unique=3) -> list:
"""Returns numeric columns with at least `min_unique` unique values.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
Returns
-------
list
Numeric column names.
"""
num = data.select_dtypes("number")
return num.columns[min_unique <= num.nunique()].to_list()
def hashable_cols(data: pd.DataFrame) -> list:
valid_idx = data.apply(lambda x: x.first_valid_index() or x.index[0])
test_row = data.loc[valid_idx].fillna(method="bfill").iloc[0]
hashable = data.columns[test_row.map(is_hashable)]
return hashable.to_list()
def cat_cols(data: pd.DataFrame, min_cats: int = None, max_cats: int = None) -> list:
"""Returns a list of categorical column names.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
min_cats : int, optional
Minimum number of categories, by default None.
max_cats : int, optional
Maximum number of categories, by default None.
Returns
-------
list
Categorical column names.
"""
cats = data.select_dtypes("category")
cat_counts = cats.nunique()
if min_cats is None:
min_cats = cat_counts.min()
if max_cats is None:
max_cats = cat_counts.max()
keep = (min_cats <= cat_counts) & (cat_counts <= max_cats)
return cats.columns[keep].to_list()
def multicat_cols(data: pd.DataFrame) -> list:
"""Returns column names of categoricals with 3+ categories.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
Returns
-------
list
Categorical (3+) column names.
"""
cats = data.select_dtypes("category")
return cats.columns[3 <= cats.nunique()].to_list()
def noncat_cols(data: pd.DataFrame) -> list:
"""Returns a list of all non-categorical column names.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
Returns
-------
list
All and only the non-categorical column names.
"""
return data.columns.drop(cat_cols(data)).to_list()
def binary_cols(data: pd.DataFrame) -> list:
"""Returns a list of columns with exactly 2 unique values.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
Returns
-------
list
All and only the binary column names.
"""
return data.columns[data.nunique() == 2].to_list()
def get_defaults(func: Callable) -> dict:
"""Returns dict of parameters with their default values, if any.
Parameters
----------
func : Callable
Callable to look up parameters for.
Returns
-------
dict
Parameters with default values, if any.
Raises
------
TypeError
`callable` must be Callable.
"""
if not isinstance(func, Callable):
raise TypeError(f"`callable` must be Callable, not {type(func)}")
params = pd.Series(inspect.signature(func).parameters)
defaults = params.map(lambda x: x.default)
return defaults.to_dict()
def get_param_names(func: Callable, include_self=False) -> list:
"""Returns list of parameter names.
Parameters
----------
func : Callable
Callable to look up parameter names for.
Returns
-------
list
List of parameter names.
"""
params = list(inspect.signature(func).parameters.keys())
if "self" in params:
params.remove("self")
return params
def pandas_heatmap(
frame: pd.DataFrame,
subset=None,
na_rep="",
precision=3,
cmap="vlag",
low=0,
high=0,
vmin=None,
vmax=None,
axis=None,
):
"""Style DataFrame as a heatmap."""
table = frame.style.background_gradient(
subset=subset, cmap=cmap, low=low, high=high, vmin=vmin, vmax=vmax, axis=axis
)
table.set_na_rep(na_rep)
table.set_precision(precision)
return table
def filter_pipe(
data: FrameOrSeries,
like: List[str] = None,
regex: List[str] = None,
axis: int = None,
) -> FrameOrSeries:
"""Subset the DataFrame or Series labels with more than one filter at once.
Parameters
----------
data: DataFrame or Series
DataFrame or Series to filter labels on.
like : list of str
Keep labels from axis for which "like in label == True".
regex : list of str
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
Dataframe or Series
Subset of `data`.
"""
if like and regex:
raise ValueError("Cannot pass both `like` and `regex`")
elif like:
if isinstance(like, str):
like = [like]
for exp in like:
data = data.filter(like=exp, axis=axis)
elif regex:
if isinstance(regex, str):
regex = [regex]
for exp in like:
data = data.filter(regex=exp, axis=axis)
else:
raise ValueError("Must pass either `like` or `regex` but not both")
return data
def title(snake_case: str):
"""Format snake case string as title."""
return snake_case.replace("_", " ").strip().title()
def title_mode(data: pd.DataFrame):
"""Return copy of `data` with strings formatted as titles."""
result = data.copy()
result.update(result.select_dtypes("object").applymap(title))
for label, column in result.select_dtypes("category").items():
result[label] = column.cat.rename_categories(title)
if result.columns.dtype == "object":
result.columns = result.columns.map(title)
if result.index.dtype == "object":
result.index = result.index.map(title)
return result
def cartesian(*arrays: ArrayLike) -> np.ndarray:
"""Returns the Cartesian product of some 1d arrays.
Returns
-------
ndarray
Cartesian product.
"""
arrays = list(arrays)
for i, array in enumerate(arrays):
array = np.asarray(array)
arrays[i] = array
_check_1d(array)
return np.array(np.meshgrid(*arrays)).T.reshape(-1, len(arrays))
def broad_corr(frame: pd.DataFrame, other: pd.DataFrame) -> pd.DataFrame:
"""Get correlations between features of one frame with those of another.
Parameters
----------
frame : DataFrame
First DataFrame.
other : DataFrame
Second DataFrame.
Returns
-------
DataFrame
Pearson correlations.
"""
return other.apply(lambda x: frame.corrwith(x))
def swap_index(data: pd.Series) -> pd.Series:
"""Swap index and values.
Parameters
----------
data : Series
Series for swapping index and values.
Returns
-------
Series
Swapped Series.
"""
return pd.Series(data.index, index=data.values, name=data.name, copy=True)
def explicit_sort(
data: FrameOrSeries,
*,
order: list,
mode: str = "values",
inplace: bool = False,
**kwargs,
) -> FrameOrSeries:
"""Sort DataFrame or Series values in explicitly specified order.
Parameters
----------
data : FrameOrSeries
Data structure to sort.
order : list
List specifying sort order.
mode : str, optional
Whether to sort 'values' (default) or 'index'.
inplace : bool, optional
Perform operation in place; False by default.
Returns
-------
FrameOrSeries
Sorted data structure or None if `inplace` is set.
"""
order = list(order)
mode = mode.lower()
if mode not in {"values", "index"}:
raise ValueError("`mode` must be 'values' or 'index'")
# Define vectorized key function
get_rank = np.vectorize(lambda x: order.index(x))
# Sort according to mode
if mode == "values":
data = data.sort_values(key=get_rank, inplace=inplace, **kwargs)
else:
data = data.sort_index(key=get_rank, inplace=inplace, **kwargs)
# Return copy or None
return data
def bitgen(seed: Union[None, int, ArrayLike] = None):
return np.random.default_rng(seed).bit_generator
@singledispatch
def get_func_name(
func: Union[
Callable,
FunctionTransformer,
Collection[Callable],
Collection[FunctionTransformer],
]
) -> Union[str, Collection[str]]:
"""Get function name(s) from function-like objects.
Parameters
----------
func : Callable, FunctionTransformer, collection of
Function-like object(s) to get names of.
Returns
-------
str or collection of
Function name(s).
"""
if hasattr(func, "pyfunc"):
name = get_func_name(func.pyfunc)
elif hasattr(func, "func"):
name = get_func_name(func.func)
elif hasattr(func, "__wrapped__"):
name = get_func_name(func.__wrapped__)
elif isinstance(func, Callable):
name = func.__name__
else:
raise TypeError(
f"Expected Callable or FunctionTransformer but encountered {type(func)}."
)
return name
@get_func_name.register
def _(func: FunctionTransformer) -> str:
return get_func_name(func.func)
@get_func_name.register
def _(func: Series) -> pd.Series:
return func.map(get_func_name)
@get_func_name.register
def _(func: ndarray) -> ndarray:
return flat_map(get_func_name, func)
@get_func_name.register
def _(func: list) -> list:
return [get_func_name(x) for x in func]
@singledispatch
def implode(
data: FrameOrSeries, column: Union[str, List[str]] = None, allow_dups=False
) -> FrameOrSeries:
"""Retract "exploded" DataFrame or Series into container of nested lists.
Parameters
----------
data : DataFrame or Series
Exploded data structure.
Returns
-------
DataFrame or Series (same as input)
Frame with values retracted into list-likes.
"""
raise TypeError(f"Expected DataFrame or Series, got {type(data).__name__}.")
@implode.register
def _(data: Series, column: Union[str, List[str]] = None, allow_dups=False) -> Series:
"""Dispatch for Series."""
if not allow_dups:
data = (
data.reset_index()
.drop_duplicates()
.set_index(data.index.name or "index")
.squeeze()
)
return data.groupby(data.index).agg(lambda x: x.to_list())
@implode.register
def _(
data: DataFrame, columns: Union[str, List[str]] = None, allow_dups=False
) -> DataFrame:
"""Dispatch for DataFrame"""
if columns is None:
raise ValueError("Must pass `columns` if input is DataFrame.")
if isinstance(columns, str):
columns = [columns]
imploded = {x: implode(data.loc[:, x], allow_dups=allow_dups) for x in columns}
data = data.loc[~data.index.duplicated()].copy()
return data.assign(**imploded)
@singledispatch
def expand(
data: Union[DataFrame, Series], column: str = None, labels: List[str] = None
) -> DataFrame:
"""Expand a column of length-N list-likes into N columns.
Parameters
----------
data : Series or DataFrame
Series or DataFrame with column to expand.
column : str, optional
Column of length-N list-likes to expand into N columns, by default None.
Only relevant for DataFrame input.
labels : list of str, optional
Labels for new columns (must provide N labels), by default None
Returns
-------
DataFrame
Expanded frame.
"""
# This is the fallback dispatch.
raise TypeError(f"Expected Series or DataFrame, got {type(data)}.")
@expand.register
def _(data: Series, column: str = None, labels: List[str] = None) -> DataFrame:
"""Dispatch for Series. Expands into DataFrame."""
if not data.map(is_list_like).all():
raise ValueError("Elements must all be list-like")
lengths = data.str.len()
if not (lengths == lengths.iloc[0]).all():
raise ValueError("List-likes must all be same length")
col_data = list(zip(*data))
if labels is not None:
if len(labels) != len(col_data):
raise ValueError("Number of `labels` must equal number of new columns")
else:
labels = range(len(col_data))
if data.name is not None:
labels = [f"{data.name}_{x}" for x in labels]
col_data = dict(zip(labels, col_data))
return DataFrame(col_data, index=data.index)
@expand.register
def _(data: DataFrame, column: str = None, labels: List[str] = None) -> DataFrame:
"""Dispatch for DataFrame. Returns DataFrame."""
if data.columns.value_counts()[column] > 1:
raise ValueError("`column` must be unique in DataFrame")
if column is None:
raise ValueError("Must pass `column` if input is DataFrame")
expanded = expand(data.loc[:, column], labels=labels)
insert_at = data.columns.get_loc(column)
data = data.drop(columns=column)
for i, label in enumerate(expanded.columns):
data.insert(
insert_at + i, label, expanded.loc[:, label], allow_duplicates=False
)
return data
def flat_map(func: Callable, arr: np.ndarray, **kwargs):
# Record shape
shape = arr.shape
# Make list
flat = [func(x, **kwargs) for x in arr.flat]
# Construct flat array
arr = np.array(flat, dtype=arr.dtype)
# Reshape in original shape
return arr.reshape(shape)
@singledispatch
def prune_categories(
data: FrameOrSeries,
column: str = None,
cut=None,
qcut=None,
inclusive=True,
show_report=True,
):
raise TypeError(f"`data` must be Series or DataFrame, got {type(data).__name__}.")
@prune_categories.register
def _(
data: Series,
column: str = None,
cut=None,
qcut=None,
inclusive=True,
show_report=True,
):
if column is not None:
raise UserWarning("Param `column` is irrelevant for Series input.")
if cut is not None:
if isinstance(cut, float):
assert 0.0 <= cut <= 1.0
counts = data.value_counts(True)
elif isinstance(cut, int):
assert 0 <= cut <= data.size
counts = data.value_counts()
elif qcut is not None:
assert 0.0 <= qcut <= 1.0
counts = data.value_counts()
cut = counts.quantile(qcut)
else:
raise ValueError("Must provide either `cut` or `qcut`.")
# Slice out categories to keep
keep = counts.loc[counts >= cut if inclusive else counts > cut]
keep = set(keep.index)
data = data.loc[data.isin(keep)].copy()
# Remove unused categories if necessary
if is_categorical_dtype(data):
data = data.cat.remove_unused_categories()
if show_report:
if set(counts.index) == keep:
print("No categories dropped.\n")
else:
report = counts.to_frame("Support")
status = pd.Series(data="dropped", index=counts.index, name="Status")
status[keep] = "retained"
report =
|
pd.merge(status, report, left_index=True, right_index=True)
|
pandas.merge
|
"""Genetic evaluation of individuals."""
import os
import sys
# import time
from collections import Counter
from itertools import compress
from numba import njit
import pkg_resources
import numpy as np
import pandas as pd
import scipy.linalg
import scipy.stats
def example_data():
"""Provide data to the package."""
cwd = os.getcwd()
stream = pkg_resources.resource_stream(__name__, 'data/chr.txt')
chrmosomedata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/group.txt')
groupdata =
|
pd.read_table(stream, sep=" ")
|
pandas.read_table
|
"""
Database connections and engines.
|pic1| |pic2| |pic3| |pic4|
.. |pic1| image:: ../images_source/db_etl_tools/oracle1.png
:width: 20%
.. |pic2| image:: ../images_source/db_etl_tools/postgres1.png
:width: 20%
.. |pic3| image:: ../images_source/db_etl_tools/teradata.png
:width: 20%
.. |pic4| image:: ../images_source/db_etl_tools/redshift1.png
:width: 20%
"""
from datetime import datetime
import os
import re
import time
import cx_Oracle
import numpy as np
import pandas as pd
import psycopg2
from colorama import Fore
from sqlalchemy import text
import psycopg2.extras
from fusetools.text_tools import Export
class Generic:
"""
Generic functions for SQL queries and ETL.
"""
@classmethod
def make_groupby(cls, sql, dim_fact_delim):
"""
Creates a dynmaically generated GROUP BY clause for a given SQL statement.
:param sql: SQL statement provided.
:param dim_fact_delim: Delimiter between selected columns.
:return: A complete SQL statement with dynamically generated GROUP BY clause.
"""
dim_segs_ = []
for idxx, d in enumerate(sql.replace("\n", "").split("SELECT")[1].split(dim_fact_delim)[0].split(", ")):
if d.strip() != '':
dim_segs_.append(d.split(" as ")[1].strip())
sql_all = sql + " GROUP BY " + ', '.join(dim_segs_)
sql_all = sql_all.replace("\n", " ").replace('"', "")
return sql_all
@classmethod
def make_db_schema(cls, df):
"""
Creates a mapping of Pandas data types to SQL data types.
:param df: A Pandas DataFrame with column types to be converted.
:return: A Pandas DataFrame of columns with corresponding SQL data types.
"""
cols = []
dtypes = []
for col in df.columns:
cols.append(col)
col_series = df[col].replace(r'^\s*$', np.nan, regex=True)
col_series = col_series.dropna()
try:
date_len = max(col_series.astype("str").str[:10].str.split("-").apply(lambda x: len(x)))
if date_len == 3:
dtypes.append("datetime64[ns]")
continue
except:
date_len = 0
try:
if col_series.astype("float").apply(float.is_integer).all():
int = True
else:
int = False
except:
dtypes.append("object")
continue
if int and date_len != 3:
dtype = "Int64"
elif not int and date_len != 3:
dtype = "float"
elif date_len == 3:
dtype = "datetime64[ns]"
else:
dtype = "object"
dtypes.append(dtype)
schema_df = pd.DataFrame({"col": cols, "dtype_new": dtypes})
old_schema_df = pd.DataFrame(df.dtypes, columns=["dtype_old"]).reset_index()
schema_df2 = pd.merge(schema_df, old_schema_df, how="inner", left_on="col", right_on="index")
schema_df2['dtype_final'] = np.where(
schema_df2['dtype_new'] != "object",
schema_df2['dtype_new'],
schema_df2['dtype_old']
)
return schema_df2
@classmethod
def db_apply_schema(cls, df, schema_df):
"""
Converts Pandas DataFrame columns based on schema DataFrame provided.
:param df: A Pandas DataFrame with column types to be converted.
:param schema_df: A Pandas DataFrame of columns with corresponding SQL data types.
:return: Pandas DataFrame with columns converted to SQL schema.
"""
df_ret = df
df_ret = df_ret.replace(r'^\s*$', np.nan, regex=True)
df_ret = df_ret.replace('', np.nan, regex=True)
df_ret = df_ret.replace({pd.np.nan: None})
for idx, row in schema_df.iterrows():
if row['dtype_final'] == "Int64":
df_ret[row['col']] = df_ret[row['col']].replace({pd.np.nan: None})
df_ret[row['col']] = df_ret[row['col']].astype(float).astype("Int64")
elif row['dtype_final'] == "datetime64[ns]":
df_ret[row['col']] = pd.to_datetime(df_ret[row['col']], errors="coerce")
else:
df_ret[row['col']] = df_ret[row['col']].replace({pd.np.nan: None})
df_ret[row['col']] = df_ret[row['col']].astype(row['dtype_final'])
return df_ret
@classmethod
def make_db_cols(cls, df):
"""
Returns a Pandas DataFrame column names that are converted for database standards.
:param df: A Pandas DataFrame with columns to be transformed
:return: Pandas DataFrame column names that are converted for database standards.
"""
columns = [re.sub('#', 'num', col) for col in df.columns]
columns = [re.sub('%', 'pct', col) for col in columns]
columns = [re.sub('[^a-zA-Z0-9]+', ' ', col) for col in columns]
columns = [col.replace(" ", "_") for col in columns]
columns = [col[:200] for col in columns]
columns = [col.lower() for col in columns]
columns = [c.lstrip("_").rstrip("_") for c in columns]
df.columns = columns
return df
@classmethod
def run_query(cls, engine, sql):
"""
Executes a SQL query.
:param engine: A database engine object.
:param sql: A SQL statement to be executed.
:return: Time for execution of SQL query.
"""
rptg_tstart = datetime.now()
engine.execute(sql)
rptg_tend = datetime.now()
tdelta = rptg_tend - rptg_tstart
tdelta = tdelta.total_seconds() / 60
print(Fore.RED + f"Runtime: {tdelta}")
class Oracle:
"""
Generic functions for Oracle SQL queries and ETL.
.. image:: ../images_source/db_etl_tools/oracle1.png
"""
@classmethod
def make_tbl(cls, df, tbl_name):
"""
Provides a CREATE TABLE SQL statement for a given Pandas DataFrame.
:param df: A Pandas DataFrame to be added as an Oracle table.
:param tbl_name: Oracle table name to be created.
:return: CREATE TABLE SQL statement.
"""
for idx, col in enumerate(df):
col_desc = col + "-" + str(df[col].map(lambda x: len(str(x))).max())
if idx == 0:
col_desc_all = [col_desc]
else:
col_desc_all.append(col_desc)
col_desc_all = pd.DataFrame(col_desc_all)
col_desc_all.columns = ["char"]
col_desc_all['column'], col_desc_all['length'] = col_desc_all['char'].str.split('-', 1).str
col_desc_types = pd.DataFrame(df.dtypes).reset_index()
col_desc_types.columns = ["column", "type"]
col_desc_all = pd.merge(
col_desc_all,
col_desc_types,
how="inner",
on="column")
d = {'object': 'VARCHAR',
'int64': 'NUMBER',
'float64': 'VARCHAR',
'datetime64[ns]': 'VARCHAR'}
col_desc_all = col_desc_all.replace(d)
col_desc_all['concat'] = np.where(col_desc_all['type'] != "NUMBER",
col_desc_all['column'] + " " + col_desc_all['type'] + "(" + col_desc_all[
'length'] + ")",
col_desc_all['column'] + " " + col_desc_all['type'])
col_desc_all = col_desc_all.apply(', '.join).reset_index()
col_desc_all.columns = ["index", "statement"]
statement = col_desc_all[col_desc_all['index'] == 'concat']
sql = statement['statement'].values
sql = str(sql)
sql = sql.replace("[", "")
sql = sql.replace("]", "")
sql = "CREATE TABLE " + tbl_name + " ( " + sql + " )"
sql = sql.replace("'", "")
return sql
@classmethod
def insert_tbl(cls, df, tbl_name):
"""
Executes an INSERT INTO statement for a given Pandas DataFrame.
:param df: A Pandas DataFrame with values to be inserted.
:param tbl_name: An Oracle table for Pandas DataFrame to be inserted into.
:return: SQL for INSERT INTO statement.
"""
sql = 'INSERT INTO ' + tbl_name + '(' + ', '.join(df.columns) + ') VALUES (' + ''.join(
[':' + str(v) + ', ' for v in list(range(1, len(df.columns)))]) + ':' + str(len(df.columns)) + ')'
return sql
@classmethod
def insert_exec(cls, sql, conn, df):
"""
Executes a provided SQL statement.
:param sql: A provided SQL query.
:param conn: A database connection.
:param df: A Pandas DataFrame.
:return: Nothing.
"""
cursor = cx_Oracle.Cursor(conn)
cursor.prepare(sql)
cursor.executemany(None, df.values.tolist())
conn.commit()
cursor.close()
# conn.close()
@classmethod
def make_tbl_complete_force(cls, df, tbl_name, eng, conn, attempt_n,
subcols=False, chunks=False, chunks_delay=False):
"""
Executes a series of SQL statements to CREATE and INSERT into a table from a Pandas DataFrame.
:param df: Pandas DataFrame to create a table from.
:param tbl_name: Name of table to be created.
:param eng: Oracle database engine object.
:param conn: Oracle database connection object.
:param attempt_n: Number of times to attempt to run INSERT statement.
:param subcols: A list of columns of the Pandas DataFrame to apply operations on.
:param chunks: Number of chunks to split Pandas DataFrame into.
:param chunks_delay: Delay between chunk's INSERT statement.
:return: Print statements outline sequential SQL statements executed.
"""
if len(df) > 0:
if subcols:
df = df[subcols]
df.fillna(' ', inplace=True)
df = df.astype(str)
# make create table sql
sql = cls.make_tbl(df, tbl_name)
print(sql)
# drop table
try:
eng.execute("drop table " + str(tbl_name))
except Exception as e:
print(str(e))
pass
# create table
eng.execute(sql)
# split large df into chunks
if chunks:
df_split = np.array_split(df, chunks)
for sub in df_split:
# make insert table sql
sql = cls.insert_tbl(sub, tbl_name)
print(sql)
# execute insert statement
# add try counter
attempts = attempt_n
while attempts > 0:
try:
cls.insert_exec(sql, conn, sub)
except:
attempts -= 1
print(Fore.RED + f"Failed upload attempt...{attempts} remaining.")
time.sleep(1)
if chunks_delay:
time.sleep(chunks_delay)
else:
time.sleep(2)
else:
# make insert table sql
sql = cls.insert_tbl(df, tbl_name)
print(sql)
# execute insert statement
cls.insert_exec(sql, conn, df)
@classmethod
def make_tbl_complete(cls, df, tbl_name, eng, conn, subcols=False, chunks=False, chunks_delay=False):
"""
Executes a series of SQL statements to CREATE and INSERT into a table from a Pandas DataFrame.
:param df: Pandas DataFrame to create a table from.
:param tbl_name: Name of table to be created.
:param eng: Oracle database engine object.
:param conn: Oracle database connection object.
:param subcols: A list of columns of the Pandas DataFrame to apply operations on.
:param chunks: Number of chunks to split Pandas DataFrame into.
:param chunks_delay: Delay between chunk's INSERT statement.
:return: Print statements outline sequential SQL statements executed.
"""
if len(df) > 0:
if subcols:
df = df[subcols]
df.fillna(' ', inplace=True)
df = df.astype(str)
# make create table sql
sql = cls.make_tbl(df, tbl_name)
print(sql)
# drop table
try:
eng.execute("drop table " + str(tbl_name))
except:
pass
# create table
eng.execute(sql)
# split large df into chunks
if chunks:
df_split = np.array_split(df, chunks)
for sub in df_split:
# make insert table sql
sql = cls.insert_tbl(sub, tbl_name)
print(sql)
# execute insert statement
cls.insert_exec(sql, conn, sub)
if chunks_delay:
time.sleep(chunks_delay)
else:
time.sleep(2)
else:
# make insert table sql
sql = cls.insert_tbl(df, tbl_name)
print(sql)
# execute insert statement
cls.insert_exec(sql, conn, df)
@classmethod
def get_oracle_date(cls, date):
"""
Converts a date to an Oracle date of format "DD-MMM-YYY"
:param date: A provided date.
:return: An Oracle database date.
"""
# given a datetime YYYY-MM-DD
if "-" in date:
year, month, day = str(pd.to_datetime(date)).split("-")
year = year[2:]
day = day.replace(" 00:00:00", "")
month_name = {
'01': 'JAN',
'02': 'FEB',
'03': 'MAR',
'04': 'APR',
'05': 'MAY',
'06': 'JUN',
'07': 'JUL',
'08': 'AUG',
'09': 'SEP',
'10': 'OCT',
'11': 'NOV',
'12': 'DEC'}
month = month_name.get(month)
date = day + "-" + month + "-" + year
# given an excel date
elif "/" in date:
date = str(pd.to_datetime(date)).replace(" 00:00:00", "")
year, month, day = str(pd.to_datetime(date)).split("-")
year = year[2:]
day = day.replace(" 00:00:00", "")
month_name = {
'01': 'JAN',
'02': 'FEB',
'03': 'MAR',
'04': 'APR',
'05': 'MAY',
'06': 'JUN',
'07': 'JUL',
'08': 'AUG',
'09': 'SEP',
'10': 'OCT',
'11': 'NOV',
'12': 'DEC'}
month = month_name.get(month)
date = day + "-" + month + "-" + year
return date
@classmethod
def get_orcl_date(cls, dat):
"""
Converts a date to an Oracle date of format "DD-MMM-YYY".
:param dat: A provided date column of a Pandas Series.
:return: An Oracle database date.
"""
dat['mon'] = dat.dt.month
dat['day'] = dat.dt.day
# .astype(str).str.pad(width=2, fillchar="0", side="left")
dat['year'] = dat.dt.year
mon_abbrevs = {
1: 'JAN',
2: 'FEB',
3: 'MAR',
4: 'APR',
5: 'MAY',
6: 'JUN',
7: 'JUL',
8: 'AUG',
9: 'SEP',
10: 'OCT',
11: 'NOV',
12: 'DEC'}
dat['mon_abbrevs'] = \
dat['mon'].map(mon_abbrevs)
dat['day'] = dat['day'].str[:-2]
dat['year'] = dat['year'].astype(str).str[:4]
dat['year'] = dat['year'].astype(str).str[-2:]
dat['date_comb'] = \
dat['day'].astype(str) + "-" + dat['mon_abbrevs'].astype(str) + "-" + dat['year'].astype(str)
return dat['date_comb']
@classmethod
def orcl_tbl_varchar_convert(cls, tbl_name, convert_cols, engine):
"""
Converts a set of columns to VARCHAR(300) for a given Oracle table.
:param tbl_name: Oracle table name.
:param convert_cols: List of columns to convert.
:param engine: Oracle database engine.
:return: Printed ALTER table statements for each column.
"""
# loop through
for col in convert_cols:
sql = f'''
alter table {tbl_name}
modify {col} varchar(300)
'''
print(sql)
engine.execute(text(sql).execution_options(autocommit=True))
time.sleep(1)
class Postgres:
"""
Generic functions for Postgres SQL queries and ETL.
.. image:: ../images_source/db_etl_tools/postgres1.png
"""
@classmethod
def run_query_pg(cls, conn, sql):
"""
Executes a SQL statement with a Postgres database connection.
:param conn: Postgres database connection object,
:param sql: SQL Statement to execute.
:return: Elapsed time to execute query.
"""
rptg_tstart = datetime.now()
cur = conn.cursor()
cur.execute(sql)
conn.commit()
rptg_tend = datetime.now()
tdelta = rptg_tend - rptg_tstart
tdelta = tdelta.total_seconds() / 60
print(Fore.RED + f"Runtime: {tdelta}")
@classmethod
def insert_val_pg(cls, col_list, val_list, tbl_name):
"""
Creates SQL to run an INSERT operation of a given Postgres table.
:param col_list: List of columns to INSERT or UPDATE.
:param val_list: List of values to INSERT or UPDATE.
:param tbl_name: Name of Postgres table.
:return: SQL to run an INSERT statement.
"""
sql = f'''
INSERT INTO {tbl_name}
(
{str(col_list).replace("[", "").replace("]", "").replace("'", "")}
) values (
{str(val_list).replace("[", "").replace("]", "")}
)
'''
return sql
@classmethod
def upsert_val_pg(cls, col_list, val_list, tbl_name, constraint_col):
"""
Creates SQL to run an UPSERT (INSERT new records or UPDATE existing records) operation of a given Postgres table.
:param col_list: List of columns to INSERT or UPDATE.
:param val_list: List of values to INSERT or UPDATE.
:param constraint_col: Column/value logic to check against for INSERT or UPDATE.
:param tbl_name: Name of Postgres table.
:return: SQL to run an UPSERT statement.
"""
update = ""
for idx, col in zip(col_list, val_list):
update = update + idx + f"='{col}',"
update = update[:update.rfind(",")]
sql = f'''
INSERT INTO {tbl_name}
({str(col_list).replace("[", "").replace("]", "").replace("'", "")})
VALUES
({str(val_list).replace("[", "").replace("]", "")})
ON CONFLICT ({constraint_col})
DO
UPDATE SET
{update}
'''
return sql
@classmethod
def upsert_tbl_pg(cls, src_tbl, tgt_tbl, src_join_cols, src_insert_cols,
src_update_cols=False, update_compare_cols=False):
"""
Creates SQL to run an UPSERT (INSERT new records or UPDATE existing records) operation of a given Postgres table.
:param src_tbl: Postgres source table that contains data to be merged from.
:param tgt_tbl: Postgres target table to receive UPSERT operation.
:param src_join_cols: Columns to use to join source and target tables.
:param src_insert_cols: Columns to be inserted from source table.
:param src_update_cols: Columns to be updated from source table.
:param update_compare_cols: Columns to use to compare values across source and target tables.
:return: A SQL Insert statement and a SQL Update statement.
"""
src_join_cols_ = (
str([f"t.{c} = s.{c} AND "
for c in src_join_cols])
.replace("[", "")
.replace("]", "")
.replace("'", "")
.replace(",", "")
)
src_join_cols_ = src_join_cols_[:src_join_cols_.rfind("AND")]
src_join_cols_f = (
str([f"t.{c} IS NULL AND "
for c in src_join_cols])
.replace("[", "")
.replace("]", "")
.replace("'", "")
.replace(",", "")
)
src_join_cols_f = src_join_cols_f[:src_join_cols_f.rfind("AND")]
src_insert_cols_ = (
str([f"s.{c}"
for c in src_insert_cols])
.replace("[", "")
.replace("]", "")
.replace("'", "")
)
if src_update_cols:
src_update_cols_ = (
str([f"{c} = s.{c},"
for c in src_update_cols])
.replace("[", "")
.replace("]", "")
.replace("', '", "")
.replace("'", "")
)
src_update_cols_ = src_update_cols_[:src_update_cols_.rfind(",")]
# update join statement
src_join_cols2_ = src_join_cols_.replace("t.", f"{tgt_tbl}.")
if update_compare_cols:
update_compare_cols_ = (
str([f"s.{c} != {tgt_tbl}.{c},"
for c in update_compare_cols])
.replace("[", "")
.replace("]", "")
.replace("', '", "")
.replace("'", "")
)
update_compare_cols_ = update_compare_cols_[:update_compare_cols_.rfind(",")]
src_join_cols2_ = src_join_cols2_ + " AND " + update_compare_cols_
# src_join_cols2_ = src_join_cols2_.replace("t.", f"{tgt_tbl}.")
# https://dwgeek.com/amazon-redshift-merge-statement-alternative-and-example.html/
sql_update = f'''
/* Update records*/
UPDATE {tgt_tbl}
SET {src_update_cols_}
FROM {src_tbl} s
WHERE {src_join_cols2_}
'''.replace("\n", " ")
else:
sql_update = ""
sql_insert = f'''
/* Insert records*/
INSERT INTO {tgt_tbl}
SELECT {src_insert_cols_}
FROM {src_tbl} s
LEFT JOIN {tgt_tbl} t
ON {src_join_cols_}
WHERE {src_join_cols_f}
'''.replace("\n", " ")
return sql_update, sql_insert
@classmethod
def make_df_tbl_pg(cls, tbl_name, df):
"""
Creates SQL to run a CREATE TABLE statement based on a Pandas DataFrame.
:param tbl_name: Postgres table name.
:param df: Pandas DataFrame.
:return: CREATE TABLE SQL statement.
"""
# fix columns
df = Generic.make_db_cols(df)
# loop thru the columns
for idx, col in enumerate(df):
# find the max length of each field
col_desc = col + "-" + str(df[col].map(lambda x: len(str(x))).max())
# find the max value of each fields
try:
col_max = col + "-" + str(max(df[col]))
except:
col_max = col + "-" + 'NA'
if idx == 0:
col_desc_all = [col_desc]
col_max_all = [col_max]
else:
col_desc_all.append(col_desc)
col_max_all.append(col_max)
# make df of column lengths
col_desc_all = pd.DataFrame(col_desc_all)
col_desc_all.columns = ["char"]
col_desc_all['column'], col_desc_all['length'] = \
col_desc_all['char'].str.split('-', 1).str
# make df of column max
col_max_all = pd.DataFrame(col_max_all)
col_max_all.columns = ["char"]
col_max_all['column'], col_max_all['max'] = \
col_max_all['char'].str.split('-', 1).str
# make df of column dtypes
col_desc_types = pd.DataFrame(df.dtypes).reset_index()
col_desc_types.columns = ["column", "type"]
# join dfs
col_desc_all = pd.merge(
col_desc_all,
col_desc_types,
how="inner",
on="column")
col_desc_all = pd.merge(
col_desc_all,
col_max_all[["column", "max"]],
how="inner",
on="column")
# define data type mapping (pandas --> teradata)
d = {'object': 'VARCHAR',
'int64': 'INTEGER',
'Int64': 'INTEGER',
'int32': 'INTEGER',
'bool': 'VARCHAR',
'float64': 'FLOAT',
'datetime64[ns]': 'TIMESTAMP',
"datetime64[ns, UTC]": "TIMESTAMP"}
col_desc_all = col_desc_all.astype(str).replace(d)
# list the columns where you want to specify the lengths
col_desc_all['concat'] = np.where(
# if varchar, use the length of the longest char
col_desc_all['type'] == "VARCHAR",
col_desc_all['column'] + " " + \
col_desc_all['type'].astype(str) + \
"(" + col_desc_all['length'] + ")",
col_desc_all['column'] + " " + \
col_desc_all['type'].astype(str))
# convert integers with a max val over certain amount to varchar
for idx, row in col_desc_all.iterrows():
if str(row['type']) == 'INTEGER' and row['max'] != "nan" and int(row['max']) > 2147483647:
val = row['concat']
col_desc_all.loc[idx, 'concat'] = \
val.replace(
" INTEGER",
f" VARCHAR({row['length']})")
col_desc_all = col_desc_all.apply(', '.join).reset_index()
col_desc_all.columns = ["index", "statement"]
statement = col_desc_all[col_desc_all['index'] == 'concat']
sql = statement['statement'].values
sql = str(sql)
sql = sql.replace("[", "")
sql = sql.replace("]", "")
sql = "CREATE TABLE " + tbl_name + " ( " + sql + " )"
sql = sql.replace("'", "")
return sql
@classmethod
def insert_df_pg(cls, cursor, conn, df, tbl_name):
"""
Executes an INSERT INTO statement for a given Pandas DataFrame into a Postgres table..
:param cursor: Postgres database cursor object.
:param conn: Postgres database connection object.
:param df: Pandas DataFrame to insert into a Postgres table.
:param tbl_name: Postgres table name.
:return: Elapsed time to execute query.
"""
df_load = df.replace({pd.np.nan: None})
df_load = df_load.round(3)
df_columns = list(df_load)
# create (col1,col2,...)
columns = ",".join(df_columns)
values = "VALUES({})".format(",".join(["%s" for _ in df_columns]))
insert_stmt = "INSERT INTO {} ({}) {}".format(tbl_name, columns, values)
rptg_tstart = datetime.now()
psycopg2.extras.execute_batch(cursor, insert_stmt, df_load.values)
conn.commit()
rptg_tend = datetime.now()
tdelta = rptg_tend - rptg_tstart
tdelta = tdelta.total_seconds() / 60
print(Fore.RED + f"Runtime: {tdelta}")
@classmethod
def make_tbl_complete_pg(cls, df, tbl_name, conn, cursor, batch_size=False):
"""
Executes a series of SQL statements to CREATE and INSERT into a table from a Pandas DataFrame.
:param df: Pandas DataFrame to create a table from.
:param tbl_name: Name of table to be created.
:param conn: Postgres database connection object.
:param cursor: Postgres database cursor object.
:param batch_size: Records to load per batch.
:return: Elapsed time to execute query.
"""
# 1 drop the table
print(f"dropping table: {tbl_name}")
try:
cls.run_query_pg(sql=f"drop table {tbl_name}", conn=conn)
except:
print(f"table doesn't exist: {tbl_name}")
pass
# create the table
print(f"creating table: {tbl_name}")
sql = cls.make_tbl_pg(df=df, tbl_name=tbl_name)
print(sql)
cls.run_query_pg(sql=sql, conn=conn)
print(f"inserting DF values into table: {tbl_name}")
rptg_tstart = datetime.now()
cls.insert_pg(df=df, tbl=tbl_name, cursor=cursor, conn=conn, batch_size=batch_size)
rptg_tend = datetime.now()
tdelta = rptg_tend - rptg_tstart
tdelta = tdelta.total_seconds() / 60
print(Fore.RED + f"Runtime: {tdelta}")
@classmethod
def sequential_load_pg(cls,
override,
tgt_tbl,
conn,
dt_start,
dt_end,
saved_day_id_range_placeholder,
dt1_interval,
dt2_interval,
sql_loop_fn,
sql_loop_fn_type,
filter_day_id_field1=False,
sql_loop_fn_dt_placeholder1=False,
filter_day_id_field2=False,
filter_id_type2=False,
sql_loop_fn_dt_placeholder2=False,
filter_day_id_field3=False,
filter_id_type3=False,
sql_loop_fn_dt_placeholder3=False,
loop_src1=False,
loop_src2=False,
loop_src3=False,
log_dir=False):
"""
:param override:
:param tgt_tbl:
:param conn:
:param dt_start:
:param dt_end:
:param saved_day_id_range_placeholder:
:param dt1_interval:
:param dt2_interval:
:param sql_loop_fn:
:param sql_loop_fn_type:
:param filter_day_id_field1:
:param sql_loop_fn_dt_placeholder1:
:param filter_day_id_field2:
:param filter_id_type2:
:param sql_loop_fn_dt_placeholder2:
:param filter_day_id_field3:
:param filter_id_type3:
:param sql_loop_fn_dt_placeholder3:
:param loop_src1:
:param loop_src2:
:param loop_src3:
:param log_dir:
:return:
"""
# define the month startend dates to loop through
rptg_dates = pd.date_range(dt_start, dt_end, freq=dt1_interval) - pd.offsets.MonthBegin(1)
rptg_dates = [str(x)[:10] for x in rptg_dates.to_list()]
rptg_dates = pd.DataFrame({
"start_date": rptg_dates,
"end_date": rptg_dates
})
rptg_dates['end_date'] = rptg_dates['end_date'].shift(-1)
rptg_dates = rptg_dates[pd.to_datetime(rptg_dates['start_date']) <= datetime.now()].dropna()
# define the weekly start/end dates to loop thru
rptg_dates_wk = pd.date_range(dt_start, dt_end, freq=dt2_interval)
rptg_dates_wk = [str(x)[:10] for x in rptg_dates_wk.to_list()]
rptg_dates_wk = pd.DataFrame({
"start_date": rptg_dates_wk,
"end_date": rptg_dates_wk
})
rptg_dates_wk['end_date'] = rptg_dates_wk['end_date'].shift(-1)
rptg_dates_wk = rptg_dates_wk[pd.to_datetime(rptg_dates_wk['start_date']) <= datetime.now()].dropna()
# dropping table if override = True
if override:
print(f'''table override True: Dropping table: {tgt_tbl} ''')
try:
cls.run_query_pg(conn=conn, sql=f'''drop table {tgt_tbl}''')
except:
conn.commit()
pass
# getting max day id value
try:
sql = f'''select max(date(trim(substring(dt_range,regexp_instr(dt_range,'to ')+3,10)))) as day_idnt FROM {tgt_tbl}'''
saved_dates = pd.read_sql_query(sql=sql, con=conn)
except:
conn.commit()
saved_dates = pd.DataFrame({"day_idnt": ["1999-12-31"]}) # arbitrarily old date
saved_date_dt = \
datetime(
year=int(str(saved_dates['day_idnt'].astype(str).values[0]).split("-")[0]),
month=int(str(saved_dates['day_idnt'].astype(str).values[0]).split("-")[1]),
day=int(str(saved_dates['day_idnt'].astype(str).values[0]).split("-")[2])
).replace(day=1).strftime("%Y-%m-%d")
rptg_dates = rptg_dates[
pd.to_datetime(rptg_dates['start_date']) >= \
pd.to_datetime(saved_date_dt)].reset_index(drop=True)
print("Starting load from:")
print(rptg_dates.head(1))
rptg_freq = "M"
for idx, row in rptg_dates.iterrows():
print(f'''{row['start_date']} to {row['end_date']}''')
# if idx == 0:
# break
if idx == 0 and saved_dates['day_idnt'][0] != pd.to_datetime(row['start_date']):
print(Fore.RED + f'''latest saved data date in table is {str(saved_dates['day_idnt'][0])} ...''')
# bump up start range:
new_start = str(pd.to_datetime(str(saved_dates['day_idnt'][0])) + pd.DateOffset(1))[:10]
print(Fore.RED + f'''revising start date to: {new_start} to {row['end_date']}''')
# if its a function, pass in params
if sql_loop_fn_type == "fn":
sql = sql_loop_fn(src=loop_src1,
src2=loop_src2,
src3=loop_src3,
start=new_start,
end=row['end_date'])
# otherwise, we will just replace strings
else:
# date range column for logging
sql = sql_loop_fn.replace(
saved_day_id_range_placeholder,
f" '{new_start} to {row['end_date']}' as dt_range,"
)
# date filters
sql = sql.replace(
sql_loop_fn_dt_placeholder1,
f" AND date({filter_day_id_field1}) >= '{new_start}' AND date({filter_day_id_field1}) < '{row['end_date']}'"
)
# check for other date fields
if sql_loop_fn_dt_placeholder2:
if filter_id_type2 == "range":
sql = sql.replace(
sql_loop_fn_dt_placeholder2,
f" AND date({filter_day_id_field2}) >= '{new_start}' AND date({filter_day_id_field2}) < '{row['end_date']}'"
)
elif filter_id_type2 == "<":
sql = sql.replace(
sql_loop_fn_dt_placeholder2,
f" AND date({filter_day_id_field2}) < '{row['end_date']}'"
)
if sql_loop_fn_dt_placeholder3:
if filter_day_id_field3 == "range":
sql = sql.replace(
sql_loop_fn_dt_placeholder3,
f" AND date({filter_day_id_field3}) >= '{new_start}' AND date({filter_day_id_field3}) < '{row['end_date']}'"
)
elif filter_id_type3 == "<":
sql = sql.replace(
sql_loop_fn_dt_placeholder3,
f" AND date({filter_day_id_field3}) < '{row['end_date']}'"
)
else:
if sql_loop_fn_type == "fn":
sql = sql_loop_fn(
start=row['start_date'],
end=row['end_date'],
src=loop_src1,
src2=loop_src2,
src3=loop_src3
)
else:
# date range column for logging
sql = sql_loop_fn.replace(
saved_day_id_range_placeholder,
f" '{row['start_date']} to {row['end_date']}' as dt_range,"
)
# date range column for logging
sql = sql.replace(
saved_day_id_range_placeholder,
f" '{row['start_date']} to {row['end_date']}' as dt_range,"
)
sql = sql.replace(
sql_loop_fn_dt_placeholder1,
f" AND date({filter_day_id_field1}) >= '{row['start_date']}' AND date({filter_day_id_field1}) < '{row['end_date']}'"
)
# check for other date fields
if sql_loop_fn_dt_placeholder2:
if filter_id_type2 == "range":
sql = sql.replace(
sql_loop_fn_dt_placeholder2,
f" AND date({filter_day_id_field2}) >= '{row['start_date']}' AND date({filter_day_id_field2}) < '{row['end_date']}'"
)
elif filter_id_type2 == "<":
sql = sql.replace(
sql_loop_fn_dt_placeholder2,
f" AND date({filter_day_id_field2}) < '{row['end_date']}'"
)
if sql_loop_fn_dt_placeholder3:
if filter_id_type2 == "range":
sql = sql.replace(
sql_loop_fn_dt_placeholder3,
f" AND date({filter_day_id_field3}) >= '{row['start_date']}' AND date({filter_day_id_field3}) < '{row['end_date']}'"
)
elif filter_id_type3 == "<":
sql = sql.replace(
sql_loop_fn_dt_placeholder3,
f" AND date({filter_day_id_field3}) < '{row['end_date']}'"
)
if idx == 0 and override:
sql_prefix = f"CREATE TABLE {tgt_tbl} AS "
else:
sql_prefix = f"INSERT INTO {tgt_tbl} "
Export.dump_sql(obj=sql_prefix + sql,
dir=log_dir + f"{tgt_tbl}_{idx}.sql")
try:
cls.run_query_pg(conn=conn, sql=sql_prefix + sql)
except Exception as e:
print(str(e))
rptg_freq = "W"
conn.commit()
break
# if the insert failed on a monthly level, cycle down to weekly level
if rptg_freq == "W":
print("Insert failed on monthly level...cycling down to weekly")
# getting max day id value
try:
sql = f'''select max(date(trim(substring(dt_range,regexp_instr(dt_range,'to ')+3,10)))) as day_idnt FROM {tgt_tbl}'''
saved_dates = pd.read_sql_query(sql=sql, con=conn)
except:
conn.commit()
saved_dates = pd.DataFrame({"day_idnt": ["1999-12-31"]}) # arbitrarily old date
saved_date_dt = \
datetime(
year=int(str(saved_dates['day_idnt'].astype(str).values[0]).split("-")[0]),
month=int(str(saved_dates['day_idnt'].astype(str).values[0]).split("-")[1]),
day=int(str(saved_dates['day_idnt'].astype(str).values[0]).split("-")[2])
).replace(day=1).strftime("%Y-%m-%d")
rptg_dates_wk = rptg_dates_wk[
pd.to_datetime(rptg_dates_wk['start_date']) >= \
pd.to_datetime(saved_date_dt)].reset_index(drop=True)
for idx, row in rptg_dates_wk.iterrows():
print(f'''{row['start_date']} to {row['end_date']}''')
if idx == 0 and saved_dates['day_idnt'][0] != pd.to_datetime(row['start_date']):
print(Fore.RED + f'''latest saved data date in table is {str(saved_dates['day_idnt'][0])} ...''')
# bump up start range:
new_start = str(pd.to_datetime(str(saved_dates['day_idnt'][0])) + pd.DateOffset(1))[:10]
print(Fore.RED + f'''revising start date to: {new_start} to {row['end_date']}''')
if sql_loop_fn_type == "fn":
sql = sql_loop_fn(src=loop_src1,
src2=loop_src2,
src3=loop_src3,
start=new_start,
end=row['end_date'])
else:
# date range column for logging
sql = sql_loop_fn.replace(
saved_day_id_range_placeholder,
f" '{new_start} to {row['end_date']}' as dt_range,"
)
sql = sql.replace(
sql_loop_fn_dt_placeholder1,
f" AND date({filter_day_id_field1}) >= '{new_start}' AND date({filter_day_id_field1}) < '{row['end_date']}'"
)
# check for other date fields
if sql_loop_fn_dt_placeholder2:
sql = sql.replace(
sql_loop_fn_dt_placeholder2,
f" AND date({filter_day_id_field2}) >= '{new_start}' AND date({filter_day_id_field2}) < '{row['end_date']}'"
)
if sql_loop_fn_dt_placeholder3:
sql = sql.replace(
sql_loop_fn_dt_placeholder3,
f" AND date({filter_day_id_field3}) >= '{new_start}' AND date({filter_day_id_field3}) < '{row['end_date']}'"
)
else:
if sql_loop_fn_type == "fn":
sql = sql_loop_fn(
start=row['start_date'],
end=row['end_date'],
src=loop_src1,
src2=loop_src2,
src3=loop_src3
)
else:
# date range column for logging
sql = sql_loop_fn.replace(
saved_day_id_range_placeholder,
f" '{row['start_date']} to {row['end_date']}' as dt_range,"
)
sql = sql.replace(
sql_loop_fn_dt_placeholder1,
f" AND date({filter_day_id_field1}) >= '{row['start_date']}' AND date({filter_day_id_field1}) < '{row['end_date']}'"
)
# check for other date fields
if sql_loop_fn_dt_placeholder2:
sql = sql.replace(
sql_loop_fn_dt_placeholder2,
f" AND date({filter_day_id_field2}) >= '{row['start_date']}' AND date({filter_day_id_field2}) < '{row['end_date']}'"
)
if sql_loop_fn_dt_placeholder3:
sql = sql.replace(
sql_loop_fn_dt_placeholder3,
f" AND date({filter_day_id_field3}) >= '{row['start_date']}' AND date({filter_day_id_field3}) < '{row['end_date']}'"
)
if idx == 0 and override:
sql_prefix = f"CREATE TABLE {tgt_tbl} AS "
else:
sql_prefix = f"INSERT INTO {tgt_tbl} "
Export.dump_sql(obj=sql_prefix + sql,
dir=log_dir + f"{tgt_tbl}_{idx}.sql")
cls.run_query_pg(conn=conn, sql=sql_prefix + sql)
@classmethod
def sequential_load_pg_wk(cls,
rptg_dates,
override,
tgt_tbl,
conn,
rptg_wk,
rptg_wk_start,
rptg_wk_end,
sql_loop_fn,
# filter dates set 1
filter_dt_field1=False,
filter_dt_type1=False,
filter_dt_placeholder1=False,
# filter dates set 2
filter_dt_field2=False,
filter_dt_type2=False,
filter_dt_placeholder2=False,
# filter dates set 3
filter_dt_field3=False,
filter_dt_type3=False,
filter_dt_placeholder3=False,
log_dir=False
):
"""
:param rptg_dates:
:param override:
:param tgt_tbl:
:param conn:
:param rptg_wk:
:param rptg_wk_start:
:param rptg_wk_end:
:param sql_loop_fn:
:param filter_dt_field1:
:param filter_dt_type1:
:param filter_dt_placeholder1:
:param filter_dt_field2:
:param filter_dt_type2:
:param filter_dt_placeholder2:
:param filter_dt_field3:
:param filter_dt_type3:
:param filter_dt_placeholder3:
:param log_dir:
:return:
"""
# dropping table if override = True
if override:
print(f'''table override True: Dropping table: {tgt_tbl} ''')
try:
cls.run_query_pg(conn=conn, sql=f'''drop table {tgt_tbl}''')
except:
conn.commit()
pass
for idx, row in rptg_dates.iterrows():
print(f'''{row['start_date']} to {row['end_date']}''')
# date range column for logging
sql = sql_loop_fn.replace(
rptg_wk,
f" '{row['rptg_wk']}' as rptg_wk,"
)
sql = sql.replace(
rptg_wk_start,
f" '{row['start_date']}' as rptg_wk_start,"
)
sql = sql.replace(
rptg_wk_end,
f" '{row['end_date']}' as rptg_wk_end,"
)
# date filters
sql = sql.replace(
filter_dt_placeholder1,
f" AND date({filter_dt_field1}) > '{row['start_date']}' "
f" AND date({filter_dt_field1}) <= '{row['end_date']}'"
)
# check for other date fields
if filter_dt_placeholder2:
if filter_dt_type2 == "range":
sql = sql.replace(
filter_dt_placeholder2,
f" AND date({filter_dt_field2}) > '{row['start_date']}' "
f" AND date({filter_dt_field2}) <= '{row['end_date']}'"
)
elif filter_dt_type2 == "<=":
sql = sql.replace(
filter_dt_placeholder2,
f" AND date({filter_dt_field2}) <= '{row['end_date']}'"
)
if filter_dt_placeholder3:
if filter_dt_type3 == "range":
sql = sql.replace(
filter_dt_placeholder3,
f" AND date({filter_dt_field3}) > '{row['start_date']}' "
f" AND date({filter_dt_field3}) <= '{row['end_date']}'"
)
elif filter_dt_type3 == "<=":
sql = sql.replace(
filter_dt_placeholder3,
f" AND date({filter_dt_field3}) <= '{row['end_date']}'"
)
if idx == 0 and override:
sql_prefix = f"CREATE TABLE {tgt_tbl} AS "
else:
sql_prefix = f"INSERT INTO {tgt_tbl} "
Export.dump_sql(obj=sql_prefix + sql,
dir=log_dir + f"{tgt_tbl}_{idx}.sql")
try:
cls.run_query_pg(conn=conn, sql=sql_prefix + sql)
except Exception as e:
print(str(e))
conn.commit()
break
class Redshift:
"""
Generic functions for Redshift SQL queries and ETL.
.. image:: ../images_source/db_etl_tools/redshift1.png
"""
@classmethod
def run_query_rs(cls, conn, sql):
"""
Executes a SQL statement with a Redshift database connection.
:param conn: Redshift database connection object,
:param sql: SQL Statement to execute.
:return: Elapsed time to execute query.
"""
rptg_tstart = datetime.now()
cur = conn.cursor()
cur.execute(sql)
conn.commit()
rptg_tend = datetime.now()
tdelta = rptg_tend - rptg_tstart
tdelta = tdelta.total_seconds() / 60
print(Fore.RED + f"Runtime: {tdelta}")
@classmethod
def insert_val_rs(cls, col_list, val_list, tbl_name):
"""
Creates SQL to run an INSERT operation of a given Redshift table.
:param col_list: List of columns to INSERT or UPDATE.
:param val_list: List of values to INSERT or UPDATE.
:param tbl_name: Name of Postgres table.
:return: SQL to run an INSERT statement.
"""
sql = f'''
INSERT INTO {tbl_name}
(
{str(col_list).replace("[", "").replace("]", "").replace("'", "")}
) values (
{str(val_list).replace("[", "").replace("]", "")}
)
'''
return sql
@classmethod
def upsert_tbl_rs(cls, src_tbl, tgt_tbl, src_join_cols, src_insert_cols,
src_update_cols=False, update_compare_cols=False):
"""
Creates SQL to run an UPSERT (INSERT new records or UPDATE existing records) operation of a given Redshift table.
:param src_tbl: Redshift source table that contains data to be merged from.
:param tgt_tbl: Redshift target table to receive UPSERT operation.
:param src_join_cols: Columns to use to join source and target tables.
:param src_insert_cols: Columns to be inserted from source table.
:param src_update_cols: Columns to be updated from source table.
:param update_compare_cols: Columns to use to compare values across source and target tables.
:return: A SQL Insert statement and a SQL Update statement.
"""
src_join_cols_ = (
str([f"t.{c} = s.{c} AND "
for c in src_join_cols])
.replace("[", "")
.replace("]", "")
.replace("'", "")
.replace(",", "")
)
src_join_cols_ = src_join_cols_[:src_join_cols_.rfind("AND")]
src_join_cols_f = (
str([f"t.{c} IS NULL AND "
for c in src_join_cols])
.replace("[", "")
.replace("]", "")
.replace("'", "")
.replace(",", "")
)
src_join_cols_f = src_join_cols_f[:src_join_cols_f.rfind("AND")]
src_insert_cols_ = (
str([f"s.{c}"
for c in src_insert_cols])
.replace("[", "")
.replace("]", "")
.replace("'", "")
)
if src_update_cols:
src_update_cols_ = (
str([f"{c} = s.{c},"
for c in src_update_cols])
.replace("[", "")
.replace("]", "")
.replace("', '", "")
.replace("'", "")
)
src_update_cols_ = src_update_cols_[:src_update_cols_.rfind(",")]
# update join statement
src_join_cols2_ = src_join_cols_.replace("t.", f"{tgt_tbl}.")
if update_compare_cols:
update_compare_cols_ = (
str([f"s.{c} != {tgt_tbl}.{c},"
for c in update_compare_cols])
.replace("[", "")
.replace("]", "")
.replace("', '", "")
.replace("'", "")
)
update_compare_cols_ = update_compare_cols_[:update_compare_cols_.rfind(",")]
src_join_cols2_ = src_join_cols2_ + " AND " + update_compare_cols_
# src_join_cols2_ = src_join_cols2_.replace("t.", f"{tgt_tbl}.")
# https://dwgeek.com/amazon-redshift-merge-statement-alternative-and-example.html/
sql_update = f'''
/* Update records*/
UPDATE {tgt_tbl}
SET {src_update_cols_}
FROM {src_tbl} s
WHERE {src_join_cols2_}
'''.replace("\n", " ")
else:
sql_update = ""
sql_insert = f'''
/* Insert records*/
INSERT INTO {tgt_tbl}
SELECT {src_insert_cols_}
FROM {src_tbl} s
LEFT JOIN {tgt_tbl} t
ON {src_join_cols_}
WHERE {src_join_cols_f}
'''.replace("\n", " ")
return sql_update, sql_insert
@classmethod
def make_df_tbl_rs(cls, tbl_name, df):
"""
Creates SQL to run a CREATE TABLE statement based on a Pandas DataFrame.
:param tbl_name: Redshift table name.
:param df: Pandas DataFrame.
:return: CREATE TABLE SQL statement.
"""
# fix columns
df = Generic.make_db_cols(df)
# loop thru the columns
for idx, col in enumerate(df):
# find the max length of each field
col_desc = col + "-" + str(df[col].map(lambda x: len(str(x))).max())
# find the max value of each fields
try:
col_max = col + "-" + str(max(df[col]))
except:
col_max = col + "-" + 'NA'
if idx == 0:
col_desc_all = [col_desc]
col_max_all = [col_max]
else:
col_desc_all.append(col_desc)
col_max_all.append(col_max)
# make df of column lengths
col_desc_all =
|
pd.DataFrame(col_desc_all)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 23 13:23:20 2022
@author: lawashburn
"""
import os
import csv
import pandas as pd
import numpy as np
from datetime import datetime
now = datetime.now()
spectra_import = r"C:\Users\lawashburn\Documents\HyPep1.0\HyPep_Simple_ASMS_Results\Raw_Files\Formatted_MS2\PO_3_untarget_ms2_output_list.csv"#path to spectra after RawConverter
ion_list_import = r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\ion_list.csv"
precursor_list_import = r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\precursor_list.csv"
working_directory = r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\num_test"
final_dir =r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\Final_results"
data_type = 'PO_'
trial = '3_'
sample_name = 'PO 3'
error_marg = 10 #+/- ppm
h_mass = 1.00784
#spectra_import = input('Enter path to formatted spectra .txt file: ')
#ion_list_import = input('Enter path to ion fragment list .csv: ')
#precursor_list_import = input('Enter path to precursor mass .csv: ')
#working_directory = input('Enter path to working directory: ')
#final_dir = input('Enter path to output directory: ')
#data_type = input('Enter tissue type: ')
#trial = input('Enter trial number: ')
#sample_name = input('Enter sample name (e.g. TG2')
#error_marg = input('Enter ppm error cutoff: ')
print('loading files', datetime.now())
#formats spectra import values
spectra_import = pd.read_csv(spectra_import, sep=",",skiprows=[0], names= ["m/z", "resolution", "charge", "intensity","MS2",'scan_number','empty'])
spectra_value =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import glob
import pandas as pd
game_files = glob.glob(os.path.join(os.getcwd(), 'games', '*.EVE'))
game_files.sort()
game_frames =[]
for game_file in game_files:
game_frame = pd.read_csv(game_file, names =['type', 'multi2', 'multi3', 'multi4','multi5', 'multi6', 'event'])
game_frames.append(game_frame)
# print(type(game_frames))
games =
|
pd.concat(game_frames)
|
pandas.concat
|
""" Classes for retrieving and removing data column by column. """
# from copy import deepcopy
import numbers
# import inspect
import itertools
import pandas
# from alignmentrs.utils import add_to_history
__all__ = ['ColData']
class ColMethods:
def __init__(self, instance):
self._instance = instance
self._axis = 1
@property
def metadata(self):
"""pandas.core.DataFrame: Returns the column metadata of the alignment.
"""
return self._instance.col_metadata
@metadata.setter
def set_metadata(self, metadata):
"""Sets the row metadata of the alignment."""
if not isinstance(metadata, pandas.DataFrame):
data = pandas.DataFrame(metadata)
self._instance.col_metadata = metadata
@property
def index(self):
"""pandas.indexes.base.Index: Returns the column index of
the alignment.
"""
return self._instance.col_metadata.index
@index.setter
def set_index(self, index):
"""Sets the column index of the alignment."""
if not isinstance(index, pandas.Index):
index = pandas.Index(index)
self._instance.col_metadata.index = index
def get(self, positions, **kwargs):
"""Returns one or more columns from the alignment as a new alignment.
Parameters
----------
positions : int or iterable
Position index/indices of columns to return.
Returns
-------
Alignment
Returns the subset of the alignment containing only
the specified columns. This returns a copy of the
original alignment.
"""
# Check input
if isinstance(positions, int):
positions = [positions]
elif isinstance(positions, list) and \
sum((isinstance(pos, int) for pos in positions)) == len(positions):
pass
else:
raise TypeError('positions must be an int or a list of int')
aln = self._instance
return aln.col.retain(positions, copy=True)
def remove(self, positions, copy=False, **kwargs):
"""Removes the specified column/s from the alignment.
Parameters
----------
positions : int or iterable
Position index/indices of columns to remove.
copy : bool, optional
Whether to return a new copy of the edited alignment, keeping the
original intact, or edit the alignment inplace. (default is False,
editing is done inplace)
Returns
-------
Alignment
When `copy` is True, returns the edited alignment after removing the specified
columns.
"""
# Check input
if isinstance(positions, int):
positions = [positions]
elif isinstance(positions, list) and \
len(positions) == 0:
pass
elif isinstance(positions, list) and \
sum((isinstance(pos, int) for pos in positions)) == len(positions):
pass
else:
raise TypeError('positions must be an int or a list of int')
aln = self._instance
if copy is True:
aln = self._instance.copy()
# Remove columns from SeqMatrix
aln.data.remove_cols(positions)
# Remove column metadata
indices = aln.column_metadata.index[positions]
aln.column_metadata.drop(indices, axis=0, inplace=True)
# # Add to history
# add_to_history(
# self._instance, '.col.remove', positions,
# copy=copy,
# **kwargs
# )
if copy is True:
return aln
def retain(self, positions, copy=False, **kwargs):
"""Retains the specified column/s in the alignment. Removes all the
other columns.
Parameters
----------
positions : int or iterable
Position index/indices of columns to be retained.
copy : bool, optional
Whether to return a new copy of the edited alignment, keeping the
original intact, or edit the alignment inplace. (default is False,
editing is done inplace)
Returns
-------
Alignment
When `copy` is True, returns the edited alignment after removing columns.
"""
# Check input
if isinstance(positions, int):
positions = [positions]
elif isinstance(positions, list) and \
sum((isinstance(pos, int) for pos in positions)) == len(positions):
pass
else:
raise TypeError('positions must be an int or a list of int')
aln = self._instance
if copy is True:
aln = self._instance.copy()
aln.data.retain_cols(positions)
aln.column_metadata = aln.column_metadata.iloc[positions]
# # Add to history
# add_to_history(
# self._instance, '.col.retain', positions,
# copy=copy,
# **kwargs
# )
if copy is True:
return aln
def reorder(self, position_list, copy=False, **kwargs):
"""Reorders columns according the specified list of positions.
Parameters
----------
position_list : list of int
Ordered list of position indices indicating the new order
of columns.
copy : bool, optional
Whether to return a new copy of the reordered alignment, keeping the
original intact, or reorder the alignment inplace. (default is
False, reordering is done inplace)
Returns
-------
Alignment
When `copy` is True, returns the edited alignment after reordering
columns.
"""
# Check input
if isinstance(position_list, list) and \
sum((isinstance(pos, int) for pos in position_list)):
if len(list) != self._instance.ncols:
raise TypeError('length of position list must be equal to the '
'number of columns in the alignment: {} != {}'.format(
len(list), self._instance.ncols
))
else:
raise TypeError('position list must be a list of int')
aln = self._instance
if copy is True:
aln = self._instance.copy()
aln.data.reorder_cols(position_list)
aln.column_metadata = aln.column_metadata.iloc[position_list]
# # Add to history
# add_to_history(
# aln, '.col.reorder', positions,
# copy=copy,
# **kwargs
# )
if copy is True:
return aln
def filter(self, function, copy=False, dry_run=False, inverse=False,
chunk_size=1, **kwargs):
"""Returns the list of column positions where the given function
is True.
Parameters
----------
function : callable
Function used to evaluate each column. The function should expect a list of list of str as input and return a bool as output.
copy : bool, optional
Whether to return a new copy of the filtered alignment, keeping the
original intact, or filter the alignment inplace.
(default is False, filtering is performed inplace)
dry_run : bool, optional
If True, evaluates the function and returns the list of True and False column position only. Nothing is edited. Otherwise,
column positions that evaluated False are removed from the
alignment. (the default is False, the alignment is edited)
inverse : bool, optional
If True, columns that evaluate True are removed. (default is False, column positions that evaluate False are removed)
chunk_size : int, optional
Number of characters to group as one column. (default is 1)
Returns
-------
Alignment
When `copy` is True, returns the edited alignment after removing columns that evaluated False.
dict
When `dry_run` is True, return a dictionary where True and False
are keys, and the list of respective column positions are the
values.
"""
# Check input
# Checks if function is callable
# Function accepts a list of str, outputs true or false
if not(function is not None and callable(function)):
raise TypeError('missing filter function')
# Checks if chunk_size value is valid
if chunk_size < 1:
raise ValueError('chunk_size value must be greater than zero')
# Check optional kwargs
custom_title = 'Filter'
if 'custom_title' in kwargs.keys():
custom_title = kwargs['custom_title']
custom_class_true = True
if 'custom_class_true' in kwargs.keys():
custom_class_true = kwargs['custom_class_true'] + ' (True)'
custom_class_false = False
if 'custom_class_false' in kwargs.keys():
custom_class_false = kwargs['custom_class_false'] + ' (False)'
aln = self._instance
if copy is True:
aln = self._instance.copy()
# Get the list of positions based on the result of the
# filtering function. Only columns that are True are recorded.
# There are 2 different ways to create the positions list depending on
# whether the chunk_size is 1 (default) or greater than 1.
positions = [
i for i, col in enumerate(aln.col.iter(chunk_size=chunk_size))
if function(col)
]
# Positions are adjusted if chunk_size is greater than 1
# For example, if chunk_size = 3 and positions = [0,1,3]
# The positions value is converted back to nucleotide positions
# [0, 1, 2, 3, 4, 5, 9, 10, 11]
if chunk_size > 1:
positions = list(itertools.chain(
*[range(i*chunk_size, (i*chunk_size)+chunk_size)
for i in positions]))
# Generate list of column ids that are "opposite" of what was given
other_positions = aln.data.invert_cols(positions)
# "dry_run" shows the columns that will are True or False based
# on the given filtering function.
# This prints out the number of columns in the True or False categories
# and returns the lists of column ids that are classified as True or
# False.
if dry_run:
parts = []
parts.append('[{}]'.format(custom_title))
parts.append('{} = {}/{}'.format(
custom_class_true, len(positions), aln.ncols))
parts.append('{} = {}/{}'.format(
custom_class_false, len(other_positions), aln.ncols))
print('\n'.join(parts))
return {
True: positions,
False: other_positions
}
# By default, the filter method will keep columns that are
# True according to the filter function, and will remove
# columns that are False.
# However, if `inverse` is True, the filter method will do the
# opposite. It will keep columns that are False and will remove
# columsn that are True
if inverse:
aln.col.remove(positions, _record_history=False)
else:
aln.col.retain(positions, _record_history=False)
# # Add to history
# func_sig = function.__qualname__ + \
# repr(inspect.signature(function)) \
# .lstrip('<Signature ').rstrip('>')
# add_to_history(
# aln, '.col.filter', func_sig,
# copy=copy,
# dry_run=dry_run,
# inverse=inverse,
# **kwargs
# )
if copy is True:
return aln
def map(self, function, step=None, chunk_size=None):
"""Maps a function to the sequence matrix column-wise.
Parameters
----------
function : callable
Function to be mapped. The function should expect a list of str as input.
step : int, optional
Number of characters to skip. (default is None)
chunk_size : int, optional
Number of characters to group as one column. (default is None)
Yields
-------
object
Each column is evaluated using the given function
"""
for col in self.iter(step=step, chunk_size=chunk_size, lazy=True):
yield function(col)
def iter(self, step=None, chunk_size=None, lazy=False):
"""Iterates over the sequence matrix column-wise.
Returns a list of list of str, the inner list representing a column.
Parameters
----------
step : int, optional
Number of characters to skip. (default is None)
chunk_size : int, optional
Number of characters to group as one column. (default is None)
lazy : bool, optional
If True, uses lazy execution (saves memory), otherwise uses eager execution. (default is False, uses eager execution)
Yields
-------
list of str
Each column is represented as a list of str whose order is based
on the ordering of samples in the alignment.
"""
# Check input type
if step is not None:
if not isinstance(step, int):
raise ValueError(
'`step` must be None or int: {} ({})'.format(
step, type(step)
))
if chunk_size is not None:
if not isinstance(chunk_size, int):
raise ValueError(
'`chunk_size` must be None or int: {} ({})'.format(
chunk_size, type(chunk_size)
))
if (step and chunk_size) and (step > 1 and chunk_size > 1):
raise ValueError(
'`step` and `chunk_size` cannot be used simultaneously')
# Initialize values
# Default for step and chunk_size is None
# The values are changed depending on how step and chunk_size are
# set by the user.
# Note that both step and chunk_size cannot be set simultaneously by
# the user.
# step is not set, chunk_size is set
if step is None:
if chunk_size is None:
# If both step and chunk_size are None, then step is 1
step = 1
else:
# If step is None but chunk_size is specified, step
# adopts the value of chunk_size to get consecutive
# columns.
step = chunk_size
# chunk_size is not set
if chunk_size is None:
chunk_size = 1
# Check step and chunk_size values
if step < 1:
raise ValueError('`step` must be greater than zero: {}'.format(
step
))
if chunk_size < 1:
raise ValueError(
'`chunk_size` must be greater than zero: {}'.format(
chunk_size
))
# Initialize other values
cnt = 0
col_range = range(0, self._instance.ncols - (chunk_size-1), step)
# iter method offers two ways to iterate: lazy and eager
# In lazy execution, the function uses yield to return a copy of
# the current column. Either get_chunk or get_col is used depending
# on whether the chunk_size is specified or not
if lazy:
for i in col_range:
if chunk_size == 1:
yield self._instance.data.get_col(i)
else:
yield self._instance.data.get_chunk(i, chunk_size)
# In eager execution, the function transforms the sequence matrix
# into a list of list of str column-wise using get_chunks or get_cols
# depending on whether the chunk_size is specified or not.
# Then the list of list of str is iterated, using yield to return
# each column (list of str) one by one.
else:
indices = list(col_range)
if chunk_size == 1:
for col in self._instance.data.get_cols(indices):
yield col
else:
for col in self._instance.data.get_chunks(indices, chunk_size):
yield col
def reset_index(self, copy=False, drop=False, **kwargs):
"""Resets the column index to the default integer index.
Parameters
----------
copy : bool, optional
Whether to return a new copy of the edited alignment, keeping the
original intact, or edit the alignment inplace. (default is False,
editing is done inplace)
drop : bool, optional
If True, do not try to insert the original index into dataframe
columns. (default is False, the original index is inserted as a
column named `index`)
Returns
-------
Alignment
When `copy` is True, returns the edited alignment after removing columns that evaluated False. Note that this returns the whole
Alignment object and not only the pandas DataFrame containing
column metadata.
"""
aln = self._instance
if copy is True:
aln = self._instance.copy()
aln.column_metadata.reset_index(drop=drop, inplace=True)
# # Add to history
# add_to_history(
# aln, '.col.reset_index',
# copy=copy,
# **kwargs
# )
if copy is True:
return aln
def add_metadata(self, metadata, name=None, copy=False, **kwargs):
"""Adds a new category to the column metadata. This adds a column
to the column metadata DataFrame.
Parameters
----------
metadata : list, dict, pandas.Series or pandas.DataFrame
Metadata to be added.
name : str, optional
Name of the new metadata category.
copy : bool, optional
Whether to return a new copy of the edited alignment, keeping the
original intact, or edit the alignment inplace. (default is False,
editing is done inplace)
Returns
-------
Alignment
When `copy` is True, returns the edited alignment after adding new column metadata categories.
"""
raise NotImplementedError()
def remove_metadata(self, name, copy=False, **kwargs):
"""Removes one or more categories from the column metadata. This removes
columns from the column metadata DataFrame.
Parameters
----------
name : str or list of str
Name/s of the new metadata categories.
copy : bool, optional
Whether to return a new copy of the edited alignment, keeping the
original intact, or edit the alignment inplace. (default is False,
editing is done inplace)
Returns
-------
Alignment
When `copy` is True, returns the edited alignment after removing column metadata categories.
"""
raise NotImplementedError()
def replace_metadata(self, name, metadata, copy=False):
"""Replaces metadata in the given column metadata category.
Parameters
----------
name : str
Name of the metadata category. This is also the column name
in the underlying column metadata DataFrame.
metadata: list
List of metadata to replace existing information.
copy : bool, optional
Whether to return a new copy of the edited alignment, keeping the
original intact, or edit the alignment inplace. (default is False,
editing is done inplace)
Returns
-------
Alignment
When `copy` is True, returns the edited alignment after replacing
the metadata in the specified column metadata category.
"""
raise NotImplementedError()
@staticmethod
def _insert_metadata(aln, position, column_values):
if isinstance(column_values, list) and \
sum((isinstance(val, list) for val in column_values)):
df = pandas.DataFrame(
{k:v for k,v in zip(aln.column_metadata, column_values)})
elif isinstance(column_values, list) and \
sum((isinstance(val, dict) for val in column_values)):
df = pandas.DataFrame(column_values)
elif isinstance(column_values, list) and \
sum((isinstance(val, numbers.Number) or isinstance(val, str)
for val in column_values)):
df = pandas.DataFrame(
{k:v for k,v in zip(aln.column_metadata, column_values)})
elif isinstance(column_values, dict) and \
sum((isinstance(val, list) for val in column_values)):
df = pandas.DataFrame(column_values)
elif isinstance(column_values, dict) and \
sum((isinstance(val, numbers.Number) or isinstance(val, str)
for val in column_values)):
df =
|
pandas.DataFrame(column_values)
|
pandas.DataFrame
|
from scipy.optimize import leastsq, curve_fit, minimize, OptimizeResult
import matplotlib
from matplotlib import axes
import matplotlib.pyplot as plt
import numpy as np
import math
from typing import Callable
import datetime
import pandas as pd
from io import StringIO
from numpy import mean, std, median
def f_logistic(x:np.ndarray, A, B, C, D) -> np.ndarray:
return (A - D)/(1 + (x/C)**B) + D
def loss_logistic(p, y, x):
A, B, C, D = p
return np.sum((y - f_logistic(x, A, B, C, D))**2)
def f_gompertz(x:np.ndarray, A, B, C, D) -> np.ndarray:
# return D + A * (np.exp(np.exp(B * (C * x))))
return D + C * np.exp(-B * np.exp(-x / A))
def loss_gompertz(p, y, x):
A, B, C, D = p
return np.sum((y - f_gompertz(x, A, B, C, D))**2)
def fit(x:np.ndarray, y:np.ndarray, lossFunc:Callable) -> OptimizeResult:
"""Tries to fit x and y data to using given loss function.
loss function itself contains the function to be fit
Args:
x (np.ndarray): x data
y (np.ndarray): y data
lossFunc (function): loss function
Returns:
OptimizeResult: scipy OptimizeResult object. Member x is numpy.ndarray
which contains the optimization solution.
"""
idx = (~np.isnan(x+y)) # eliminate missing data points
x = x[idx]
y = y[idx]
A0 = y.max() * 2
D0 = y.min() / 2
C0 = x.mean() * 2
B0 = 1
p0 = [A0, B0, C0, D0] # starting values to begin optimization.
r = minimize(lossFunc, x0=p0, args=(y, x), method='CG')
return r
def plotfunc(xrange:tuple, f:Callable, r:OptimizeResult, axs:matplotlib.axes) -> tuple:
xp = np.linspace(xrange[0], xrange[1], 100)
yp = f(xp, *r.x)
axs.plot(xp, yp)
return xp, yp
def plotdata(x:np.ndarray, y:np.ndarray, axs:matplotlib.axes, xrange=(-1,-1)) -> tuple:
idx = (~np.isnan(x+y)) # eliminate missing data points
x = x[idx]
y = y[idx]
xmin = xrange[0]
xmax = xrange[1]
if xmin == -1:
xmin = x.min()
if xmax == -1:
xmax = x.max()
x, y = x[x >= xmin], y[x >= xmin]
x, y = x[x <= xmax], y[x <= xmax]
axs.scatter(x, y)
return np.array(x), np.array(y)
def doublingt(xrange:tuple, f:Callable, r:OptimizeResult, axs:matplotlib.axes) -> tuple:
"""Plots doubling time chart in semi-log scale (log y - linear x axis).
Returns x and y lists in a tuple.
Time point for minimum doubling time can be retrieved by:
dx[dy.argmin()]
Args:
xrange (tuple): (xmin, xmax)
f (Callable): fit function
r (OptimizeResult): optimization results
axs (matplotlib.axes): the axis for the plot
Returns:
tuple: x and y as lists
"""
xp = np.linspace(xrange[0], xrange[1], 100)
yp = f(xp, *r.x)
dy = []
for i in range(0, len(xp)-1):
dx = xp[i+1] - xp[i]
_dy = math.log(2) * dx / (math.log(yp[i+1]) - math.log(yp[i]))
dy.append(_dy)
axs.set_yscale('log')
axs.minorticks_on()
axs.yaxis.set_minor_locator(plt.MaxNLocator(4))
axs.grid(b=True, which='major', color='#666666', linestyle='-')
axs.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
axs.plot(xp[:-1], dy, c='g')
return np.array(xp[:-1]), np.array(dy)
def timestr(s:str, century='20') -> datetime:
"""Converts YYMMDD-hhmm to datetime.datetime object.
century is set to 20 as default
Args:
s (str): date signature string YYMMDDhhmm
century (str): 2-digit century string. 20 as default
Returns:
datetime: datetime.datetime object
"""
return datetime.datetime(
int('{0}{1}'.format(century, s[0:2])), # year
int(s[2:4]), # month
int(s[4:6]), # day
int(s[7:9]), # hr
int(s[9:11]) # min
)
def dt(t0:str, t1:str) -> int:
"""Delta t as minutes between t0 and t1 date-time strings
Args:
t0 (str): date-time string in YYMMDD-hhmm format
t1 (str): date-time string in YYMMDD-hhmm format
Returns:
int: delta t in minutes
"""
return (timestr(t1).timestamp() - timestr(t0).timestamp()) / 60
def readPlates(fn:str) -> (pd.DataFrame, dict):
f = open(fn, 'r')
layout = []
plates = []
plateNames = []
tempFile = StringIO('')
line = f.readline()
header = ''
while line:
if '#' in line:
if header != '':
tempFile.flush()
tempFile.seek(0)
#print(header)
if header == 'layout':
df =
|
pd.read_csv(tempFile, sep='\t', header=None)
|
pandas.read_csv
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as scp_stats
import pandas as pd
import matplotlib
matplotlib.rcParams.update({'font.size': 20})
N_trials = 20
type_color = {'Scnn1a': 'darkorange', 'Rorb': 'red', 'Nr5a1': 'magenta', 'PV1': 'blue', 'PV2': 'cyan', 'Exp.E.': 'gray', 'Exp.I.': 'gray'}
type_order = ['Scnn1a', 'Rorb', 'Nr5a1', 'Exp.E.', 'PV1', 'PV2', 'Exp.I.']
#type_color = {'Scnn1a': 'darkorange', 'Rorb': 'red', 'Nr5a1': 'magenta', 'PV1': 'blue', 'PV2': 'cyan'}
#type_order = ['Scnn1a', 'Rorb', 'Nr5a1', 'PV1', 'PV2']
# Decide which systems we are doing analysis for.
sys_dict = {}
sys_dict['ll1'] = { 'cells_file': '../build/ll1.csv', 'f_1': '../simulations_ll1/spont/output_ll1_spont_', 'f_2': '_sd278/spk.dat', 'f_3': '_sd278/tot_f_rate.dat', 'f_out': 'spont_activity/ll1_spont.csv', 'types': [] }
sys_dict['ll2'] = { 'cells_file': '../build/ll2.csv', 'f_1': '../simulations_ll2/spont/output_ll2_spont_', 'f_2': '_sd278/spk.dat', 'f_3': '_sd278/tot_f_rate.dat', 'f_out': 'spont_activity/ll2_spont.csv', 'types': [] }
sys_dict['ll3'] = { 'cells_file': '../build/ll3.csv', 'f_1': '../simulations_ll3/spont/output_ll3_spont_', 'f_2': '_sd278/spk.dat', 'f_3': '_sd278/tot_f_rate.dat', 'f_out': 'spont_activity/ll3_spont.csv', 'types': [] }
#sys_dict['rr2'] = { 'cells_file': '../build/rr2.csv', 'f_1': '../output_rr2_spont_', 'f_2': '_sd282_cn0/spk.dat', 'f_3': '_sd282_cn0/tot_f_rate.dat', 'f_out': 'spont_activity/rr2_spont.csv', 'types': [] }
#sys_dict['ll1_LIF'] = { 'cells_file': '../build/ll1.csv', 'f_1': '/data/mat/ZiqiangW/simulation_ll_syn_data_lif_z102/simulation_ll1/output_ll1_spont_', 'f_2': '_sdlif_z101/spk.dat', 'f_3': '_sdlif_z101/tot_f_rate.dat', 'f_out': 'spont_activity_LIF/ll1_spont.csv', 'types': [] }
#sys_dict['ll2_LIF'] = { 'cells_file': '../build/ll2.csv', 'f_1': '/data/mat/ZiqiangW/simulation_ll_syn_data_lif_z102/simulation_ll2/output_ll2_spont_', 'f_2': '_sdlif_z101/spk.dat', 'f_3': '_sdlif_z101/tot_f_rate.dat', 'f_out': 'spont_activity_LIF/ll2_spont.csv', 'types': [] }
#sys_dict['ll3_LIF'] = { 'cells_file': '../build/ll3.csv', 'f_1': '/data/mat/ZiqiangW/simulation_ll_syn_data_lif_z102/simulation_ll3/output_ll3_spont_', 'f_2': '_sdlif_z101/spk.dat', 'f_3': '_sdlif_z101/tot_f_rate.dat', 'f_out': 'spont_activity_LIF/ll3_spont.csv', 'types': [] }
result_fname_prefix = 'spont_activity/new_av_spont_rates_by_type'
#result_fname_prefix = 'spont_activity_LIF/new_av_spont_rates_by_type'
result_fig_fname = result_fname_prefix + '.eps'
'''
# Read files containing firing rates for each trial, average over all trials, and save to file.
for i_sys, sys_name in enumerate(sys_dict.keys()):
# Obtain information about cell types.
cells = pd.read_csv(sys_dict[sys_name]['cells_file'], sep=' ')
gids = cells['index'].values
out_df = pd.DataFrame({'gid': gids, 'type': cells['type'].values})
# Process the firing rate files.
rates = np.zeros(gids.size)
for i_trial in xrange(0, N_trials):
f_name = '%s%d%s' % (sys_dict[sys_name]['f_1'], i_trial, sys_dict[sys_name]['f_3'])
print 'Processing file %s.' % (f_name)
tmp_rates = np.genfromtxt(f_name, delimiter=' ')[:, 1] # Assume all files have the same columns of gids; use the 2nd column for rates.
rates += tmp_rates
rates = rates / (1.0 * N_trials)
out_df['%s_frate' % (sys_name)] = rates
out_df.to_csv(sys_dict[sys_name]['f_out'], sep=' ', index=False)
'''
# Read files with firing rate averages over trials for simulations.
rates_df = pd.DataFrame()
for sys_name in sys_dict.keys():
tmp_df = pd.read_csv(sys_dict[sys_name]['f_out'], sep=' ')
# Combine firing rates from all systems into one file.
tmp_df.rename(columns={'%s_frate' % (sys_name): 'frate'}, inplace=True)
rates_df = pd.concat([rates_df, tmp_df], axis=0)
# Read file with firing rate averages over trials for experiments.
exp_df =
|
pd.read_csv('/allen/aibs/mat/antona/experimental_data/2017_07_25_ephys_Sev_processed_by_Yazan/f_avg_per_cell_Spont.csv', sep=' ', header=None)
|
pandas.read_csv
|
import json
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.colors import LogNorm
from pylossmap import BLMData
# from tensorflow.keras.utils import Sequence
from tqdm.auto import tqdm
UFO_LABEL = 1
NON_UFO_LABEL = 0
def augment_mirror(data: np.ndarray) -> np.ndarray:
"""Augment the data with the mirrored data.
Args:
data: data to augment
Returns:
the data with the mirrored data appended to the data.
"""
return np.vstack([data, data[:, ::-1]])
def create_labels(
ufo: np.ndarray, non_ufo: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Create the label arrays.
Args:
ufo: ufo data
non_ufo: non ufo data
Returns:
The labels of the ufo and non ufo data.
"""
ufo_labels = np.array([UFO_LABEL] * len(ufo))[:, None]
non_ufo_labels = np.array([NON_UFO_LABEL] * len(non_ufo))[:, None]
return ufo_labels, non_ufo_labels
def truncate_data(data: List[pd.DataFrame], target_length: int) -> np.ndarray:
"""Truncate the rows to a given length, centered.
Args:
data: iterable containing vector data to truncate
target_length: the desired length of the vector conatining the blm signals
Returns:
Array containing the truncated data.
"""
truncated_rows = []
for row in data:
length = row.shape[1]
half_delta = (length - target_length) / 2
start_shift = int(np.floor(half_delta))
end_cutoff = int(np.ceil(half_delta))
row_chunk = row.iloc[0, start_shift:-end_cutoff]
truncated_rows.append(row_chunk.to_numpy())
truncated_rows = np.array(truncated_rows)
return truncated_rows
def create_peak_dataset(
ufo_meta: pd.DataFrame,
raw_data_dir: Path,
dcum_around: int = 24000,
target_length: int = 33,
prior_dt: int = 3,
post_dt: int = 3,
non_ufo_threshold: float = 1e-3,
include_meta: bool = True,
) -> Dict[str, np.ndarray]:
"""Create a ufo and non ufo peak dataset.
Args:
ufo_meta: metadata of the ufo events
raw_data_dir: directory containing the raw data
dcum_around: dcum range around the ufo
target_length: the desired length of the vector conatining the blm signals
prior_dt: how many seconds back to get the prior events
post_dt: how many seconds forward to get the post events
non_ufo_threshold: don't include non ufo samples when the max is above threshold
include_meta: include the metadata of the samples in the returned dictionary
Returns:
Dictionary containing the ufo and non ufo data and metadata.
"""
non_ufo_prior = []
non_ufo_prior_meta = []
peaks = []
peaks_meta = []
non_ufo_post = []
non_ufo_post_meta = []
for idx, ufo in tqdm(ufo_meta.iterrows(), total=len(ufo_meta)):
raw_fill_data = BLMData.load(raw_data_dir / f"{ufo.fill}.h5")
raw_fill_data.df = raw_fill_data.df.droplevel("mode")
raw_fill_data.df = raw_fill_data.df.iloc[~raw_fill_data.df.index.duplicated()]
raw_idx = raw_fill_data.df.index.get_loc(ufo.datetime, method="nearest") + 1
around_blms = raw_fill_data.meta[
(raw_fill_data.meta["dcum"] < ufo.dcum + dcum_around)
& (raw_fill_data.meta["dcum"] > ufo.dcum - dcum_around)
]
around_data = raw_fill_data.df[around_blms.index].iloc[raw_idx : raw_idx + 1]
if around_data.shape[1] < target_length:
print("skipping sample, not enough blms.")
continue
peaks.append(around_data)
if include_meta:
peaks_meta.append(ufo)
around_prior_data = raw_fill_data.df[around_blms.index].iloc[
raw_idx - prior_dt : raw_idx + 1 - prior_dt
]
around_post_data = raw_fill_data.df[around_blms.index].iloc[
raw_idx + post_dt : raw_idx + 1 + post_dt
]
print("===============")
print("prior max: ", around_prior_data.max().max())
print("prior min: ", around_prior_data.min().min())
print("prior shape: ", around_prior_data.shape)
if around_prior_data.max().max() > non_ufo_threshold:
print("High value, skipping")
print(idx, ufo)
elif around_prior_data.min().min() == 0:
print("found a zero min value, skipping")
print(idx, ufo)
else:
non_ufo_prior.append(around_prior_data)
if include_meta:
prior_meta = ufo.copy()
prior_meta["datetime"] = prior_meta["datetime"] - pd.Timedelta(
f"{prior_dt}s"
)
non_ufo_prior_meta.append(prior_meta)
print("post max: ", around_post_data.max().max())
print("post min: ", around_post_data.min().min())
print("post shape: ", around_post_data.shape)
if around_post_data.max().max() > non_ufo_threshold:
print("High value, skipping")
print(idx, ufo)
elif around_post_data.min().min() == 0:
print("found a zero min value, skipping")
print(idx, ufo)
else:
non_ufo_post.append(around_post_data)
if include_meta:
post_meta = ufo.copy()
post_meta["datetime"] = post_meta["datetime"] + pd.Timedelta(
f"{post_dt}s"
)
non_ufo_post_meta.append(post_meta)
out = {
"ufo": truncate_data(peaks, target_length=target_length),
"non_ufo_prior": truncate_data(non_ufo_prior, target_length=target_length),
"non_ufo_post": truncate_data(non_ufo_post, target_length=target_length),
}
if include_meta:
out["ufo_meta"] = pd.DataFrame(peaks_meta)
out["non_ufo_prior_meta"] = pd.DataFrame(non_ufo_prior_meta)
out["non_ufo_post_meta"] =
|
pd.DataFrame(non_ufo_post_meta)
|
pandas.DataFrame
|
import os
import sys
import json
import pandas as pd
from bentoml.configuration import get_bentoml_deploy_version
def test_pip_install_saved_bentoservice_bundle(bento_bundle_path, tmpdir):
import subprocess
install_path = str(tmpdir.mkdir("pip_local"))
bentoml_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
stdout = subprocess.check_output(
["pip", "install", "-U", "--target={}".format(install_path), bento_bundle_path]
).decode('utf-8')
assert "Processing {}".format(bento_bundle_path) in stdout
assert "Collecting bentoml=={}".format(get_bentoml_deploy_version()) in stdout
assert "Successfully built TestBentoService" in stdout
# ensure BentoML is installed as dependency
assert os.path.isfile(os.path.join(install_path, "bin/bentoml"))
assert os.path.isdir(os.path.join(install_path, "bentoml"))
sys.path.insert(0, install_path)
TestBentoService = __import__("TestBentoService")
sys.path.remove(install_path)
svc = TestBentoService.load()
res = svc.predict_dataframe(pd.DataFrame(
|
pd.DataFrame([1], columns=["col1"])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import os
import math
from gluonts_forecasts.model import Model
from dku_constants import (
METRICS_DATASET,
METRICS_COLUMNS_DESCRIPTIONS,
TIMESERIES_KEYS,
EVALUATION_METRICS_DESCRIPTIONS,
ROW_ORIGIN,
)
from gluonts_forecasts.gluon_dataset import DkuGluonDataset
from gluonts_forecasts.model_config_registry import ModelConfigRegistry
from gluonts_forecasts.utils import add_row_origin
from safe_logger import SafeLogger
logger = SafeLogger("Forecast plugin")
class TrainingSession:
"""
Class to train and evaluate multiple GluonTS estimators on a training dataframe, and to retrieve an aggregated metrics dataframe
Attributes:
target_columns_names (list): List of column names to predict
time_column_name (str)
frequency (str): Pandas timeseries frequency (e.g. '3M')
epoch (int): Number of epochs used by the GluonTS Trainer class
models_parameters (dict): Dictionary of model names (key) and their parameters (value)
prediction_length (int): Number of time steps to predict
training_df (DataFrame): Training dataframe
make_forecasts (bool): True to output the evaluation predictions of the last prediction_length time steps
external_features_columns_names (list, optional): List of columns with dynamic real features over time
timeseries_identifiers_names (list, optional): Columns to identify multiple time series when data is in long format
batch_size (int, optional): Size of batch used by the GluonTS Trainer class
user_num_batches_per_epoch (int, optional): Number of batches per epoch selected by user. -1 means to compute scaled number
num_batches_per_epoch (int, optional): Number of batches per epoch
season_length (int, optional): Length of the seasonality parameter.
mxnet_context (mxnet.context.Context, optional): MXNet context to use for Deep Learning models training.
timeseries_cross_validation (bool, optional): Whether to use timeseries cross-validation.
rolling_windows_number (int, optional): Number of splits used in the training set. Mandatory for cross-validation.
cutoff_period (int, optional): Number of time steps between each split. Mandatory for cross-validation.
"""
def __init__(
self,
target_columns_names,
time_column_name,
frequency,
epoch,
models_parameters,
prediction_length,
training_df,
make_forecasts,
external_features_columns_names=None,
timeseries_identifiers_names=None,
batch_size=None,
user_num_batches_per_epoch=None,
season_length=None,
mxnet_context=None,
timeseries_cross_validation=False,
rolling_windows_number=1,
cutoff_period=-1,
):
self.models_parameters = models_parameters
self.models = []
self.glutonts_dataset = None
self.training_df = training_df
self.prediction_length = prediction_length
self.target_columns_names = target_columns_names
self.time_column_name = time_column_name
self.frequency = frequency
self.epoch = epoch
self.make_forecasts = make_forecasts
self.external_features_columns_names = external_features_columns_names
self.use_external_features = bool(external_features_columns_names)
self.timeseries_identifiers_names = timeseries_identifiers_names
self.session_name = None
self.session_path = None
if self.make_forecasts:
self.evaluation_forecasts_df = None
self.metrics_df = None
self.batch_size = batch_size
self.user_num_batches_per_epoch = user_num_batches_per_epoch
self.num_batches_per_epoch = None
self.season_length = season_length
self.mxnet_context = mxnet_context
self.timeseries_cross_validation = timeseries_cross_validation
self.rolling_windows_number = rolling_windows_number if timeseries_cross_validation else 1
self.cutoff_period = cutoff_period
self.gluon_list_datasets_by_cut_length = None
self.rolling_windows_cut_lengths_train_test_pairs = None
def init(self, session_name, partition_root=None):
"""Create the session_path.
Args:
session_name (Timestamp)
partition_root (str, optional): Partition root path, concatenated to session_name to create the session_path. Defaults to None.
"""
self.session_name = session_name
if partition_root is None:
self.session_path = session_name
else:
self.session_path = os.path.join(partition_root, session_name)
def create_gluon_list_datasets(self):
"""Create train and test gluon list datasets.
The last prediction_length time steps are removed from each timeseries of the train dataset.
Compute optimal num_batches_per_epoch value based on the train dataset size.
"""
gluon_dataset = DkuGluonDataset(
time_column_name=self.time_column_name,
frequency=self.frequency,
target_columns_names=self.target_columns_names,
timeseries_identifiers_names=self.timeseries_identifiers_names,
external_features_columns_names=self.external_features_columns_names,
min_length=2 * self.prediction_length, # Assuming that context_length = prediction_length
)
self.rolling_windows_cut_lengths_train_test_pairs = self._compute_rolling_windows_cut_lengths_train_test_pairs()
rolling_windows_unique_cut_lengths = self._compute_rolling_windows_unique_cut_lengths(
self.rolling_windows_cut_lengths_train_test_pairs
)
self.gluon_list_datasets_by_cut_length = gluon_dataset.create_list_datasets(
self.training_df, cut_lengths=rolling_windows_unique_cut_lengths
)
if self.user_num_batches_per_epoch == -1:
self.num_batches_per_epoch = self._compute_optimal_num_batches_per_epoch()
else:
self.num_batches_per_epoch = self.user_num_batches_per_epoch
def instantiate_models(self):
"""Instantiate all the selected models."""
for model_name, model_parameters in self.models_parameters.items():
self.models.append(
Model(
model_name,
model_parameters=model_parameters,
frequency=self.frequency,
prediction_length=self.prediction_length,
epoch=self.epoch,
use_external_features=self.use_external_features,
batch_size=self.batch_size,
num_batches_per_epoch=self.num_batches_per_epoch,
season_length=self.season_length,
mxnet_context=self.mxnet_context,
)
)
def train_evaluate_models(self, retrain=False):
"""
Evaluate all the selected models (then retrain on complete data if specified),
get the metrics dataframe and create the forecasts dataframe if make_forecasts=True.
"""
metrics_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import requests
import io
import os
from sqlalchemy import create_engine
def init_sql_conn():
user = "root"
host = os.getenv('MYSQL_HOST')
db = "covid"
engine = create_engine(f'mysql://{user}@{host}/{db}')
conn = engine.connect()
return conn
if __name__ == "__main__":
start_date = "3/1/20"
url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series"
datasets = {
"deaths": "time_series_covid19_deaths_global.csv",
"cases": "time_series_covid19_confirmed_global.csv",
}
columns = ["day", "region"]
df_all =
|
pd.DataFrame(columns=columns)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
"""
Created on Sun Jun 20 16:38:50 2021
@author: Team_16
"""
#%%
# =============================================================================
# 前期準備
# =============================================================================
'''
置入所需資料處理與繪圖套件
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import display
'''
匯入資料
'''
df_train = pd.read_csv("train.csv")
df_test = pd.read_csv("test.csv")
df_data = df_train.append(df_test)
#重新索引
df_data = df_data.reset_index()
#%%
# =============================================================================
# 遺漏值處理
# =============================================================================
'''
遺漏值個數
'''
#Pclass(133), Age(189), Cabin(690) 缺失值較多
for col in df_data.columns.tolist():
print('{} column missing values: {}'.format(col, df_data[col].isnull().sum()))
'''
Fare補值
'''
#查看遺漏值乘客的資訊
df_data[df_data['Fare'].isnull()]
#因具遺漏值的乘客為三等艙,故補三等艙、SibSp=0、Parch=0的fare中位數
med_fare = df_data.groupby(['Pclass', 'Parch', 'SibSp']).Fare.median()[3][0][0]
df_data['Fare'] = df_data['Fare'].fillna(med_fare)
'''
Pclass補值
'''
#補值第一步驟,按ticket補值,同樣ticket會是同樣艙等:
deplicate_ticket = []
for ticket in df_data.Ticket.unique():
tem = df_data.loc[df_data.Ticket == ticket, 'Fare']
if tem.count() > 1:
deplicate_ticket.append(df_data.loc[df_data.Ticket == ticket,['Name','Ticket','Fare','Pclass','Survived']])
deplicate_ticket = pd.concat(deplicate_ticket)
#一個ticket對一個pclass
deplicate_ticket_dropna = deplicate_ticket.dropna() #先刪除Pclass是NA的人
match_tp = deplicate_ticket_dropna.drop_duplicates(subset=['Ticket'], keep='first', inplace=False)
match_tp = pd.concat((match_tp['Ticket'],match_tp['Pclass']),axis = 1)
match_tp_dict = match_tp.set_index('Ticket')['Pclass'].to_dict()
#按ticket上的艙等補值
df_data.Pclass = df_data.Pclass.fillna(df_data.Ticket.map(match_tp_dict))
#補值第二步驟,按fare補值:
#觀察Pclass與fare的彼此分布狀況
f, ax = plt.subplots(figsize=(8,3))
ax.set_title('Pclass Fare dist', size=20)
sns.distplot(df_data[df_data.Pclass == 1].dropna().Fare, hist=False, color='black', label='P-1')
sns.distplot(df_data[df_data.Pclass == 2].dropna().Fare, hist=False, color='green', label='P-2')
sns.distplot(df_data[df_data.Pclass == 3].dropna().Fare, hist=False, color='blue', label='P-3')
sns.distplot(df_data[df_data.Pclass.isnull() == True].Fare, hist=False, color='red', label='P-NA')
ax.legend(fontsize=15)
#觀察Fare在各艙等的四分位線狀況
print(df_data[df_data.Pclass==2].Fare.describe())
print(df_data[df_data.Pclass==3].Fare.describe())
print(df_data[df_data.Pclass==1].Fare.describe())
#觀察結論:75%的三艙等fare低於15.5;75%的二艙等fare低於26
#補值條件:Fare低於15.5為三艙等,Fare低於26為二艙等,其他為一艙等
#取出pclass不為na的dataframe
no_na = df_data.loc[df_data['Pclass'].isnull() == False]
#取出pclass為na的dataframe
yes_na = df_data.loc[df_data['Pclass'].isnull() == True]
fill_p1 = yes_na.loc[df_data['Fare'] > 26]
fill_p2 = yes_na.loc[(df_data['Fare'] >15.5) & (df_data['Fare'] <= 26)]
fill_p3 = yes_na.loc[df_data['Fare'] <= 15.5]
p1 = fill_p1.fillna({'Pclass':1})
p2 = fill_p2.fillna({'Pclass':2})
p3 = fill_p3.fillna({'Pclass':3})
df_data =
|
pd.concat([no_na, p1, p2, p3])
|
pandas.concat
|
from lib.utils import ErrorMetricCalculator
import argparse
import os
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.model_selection import LeaveOneOut
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn import metrics
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.dummy import DummyClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from skfeature.function.statistical_based import CFS
from collections import OrderedDict
from multiprocessing import Pool
import numpy as np
from collections import Counter
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import VarianceThreshold
from sklearn.svm import SVC
random_state=42
def preprocess(data,igCol):
data_preproc = data.copy(deep=True)
data_preproc['len'] = data_preproc['len'].astype(float)
data_preproc['max_fft_imag'] = data_preproc['max_fft_imag'].astype(float)
# data_preproc.dropna(subset=['Winner','Community_0','Community_1','Community_2'],inplace=True)
data_preproc.dropna(subset=['Winner'],inplace=True)
if igCol is not None and len(igCol)>0:
data_preproc.drop(columns=igCol,inplace=True)
le = LabelEncoder()
data_preproc['Winner'] = le.fit_transform(data_preproc['Winner'])
cat_columns = ['sector'] if igCol is None or 'sector' not in igCol else []
for i in range(7): ## GAMBIARRA. DEIXAR MAIs ORG DEPOIS
col_name='Community_{}'.format(i)
if col_name in data_preproc.columns:
print("================COMMUNITY COLUMN DETECTED=================")
data_preproc[col_name] = data_preproc[col_name].astype(str)
data_preproc.fillna(value={col_name: 'NA'},inplace=True)
cat_columns.append(col_name)
data_preproc =
|
pd.get_dummies(data_preproc, prefix_sep="__", columns=cat_columns,dtype=float)
|
pandas.get_dummies
|
import email
import glob
import json
from argparse import ArgumentParser, Namespace
from email.header import decode_header, make_header
from email.message import Message
from urllib.parse import urlparse
import numpy as np
from bs4 import BeautifulSoup
from pandas import DataFrame
def find_spy_pixels():
args = parse_arguments()
df =
|
DataFrame([], columns=["domain", "src", "sender", "filename", "attributes"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
from builtins import map, range, object, zip, sorted
from past.builtins import basestring
from numbers import Real
from .base import BaseClass
from .utils import Utils, Tuple
from .iterators import RowIterator, ColIterator
from . import amplpython
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
class Row(BaseClass):
"""
Represents a row in a :class:`~amplpy.DataFrame`.
"""
def __init__(self, _impl):
self._impl = _impl
def __iter__(self):
return RowIterator(self._impl)
def __getitem__(self, key):
return Utils.castVariantRef(self._impl.getIndex(key))
def toString(self):
return str(list(self))
class Column(BaseClass):
"""
Represents a column in a :class:`~amplpy.DataFrame`.
"""
def __init__(self, _impl):
self._impl = _impl
def __iter__(self):
return ColIterator(self._impl)
def toString(self):
return str(list(self))
class DataFrame(BaseClass):
"""
A DataFrame object, used to communicate data to and from the AMPL entities.
An object of this class can be used to do the following tasks:
- Assign values to AMPL entities (once the DataFrame is populated, use
:func:`~amplpy.AMPL.setData` to assign its values to the modelling entities
in its columns)
- Get values from AMPL, decoupling the values from the AMPL entities they
originate via :func:`~amplpy.Entity.getValues`.
A DataFrame object can be created in various ways.
- Create a skeleton by specifiying manually the indexing columns and the
column headers.
- Get values from AMPL, decoupling the values from the AMPL entities they
originate from (via :func:`~amplpy.Entity.getValues`).
Populating a DataFrame object can be done adding row by row to a
pre-existing skeleton via :func:`~amplpy.DataFrame.addRow`, setting whole
columns of a pre-existing skeleton via :func:`~amplpy.DataFrame.setColumn`
or adding columns (including indexing columns) via
:func:`~amplpy.DataFrame.addColumn`.
Modifying a DataFrame object can be done via
:func:`~amplpy.DataFrame.setColumn` or, item by item, via
:func:`~amplpy.DataFrame.setValue`.
Accessing data in a DataFrame can be done row by row using
:func:`~amplpy.DataFrame.getRow` or by column via
:func:`~amplpy.DataFrame.getColumn`.
"""
def __init__(self, index, columns=tuple(), **kwargs):
"""
Create a new DataFrame with specifed index and column headers.
Args:
index: Index column;
columns: Column headers.
"""
if index is not None:
if isinstance(index, basestring):
index = (index,)
if isinstance(columns, basestring):
columns = (columns,)
index_names = [
col[0] if isinstance(col, tuple) else col
for col in index
]
column_names = [
col[0] if isinstance(col, tuple) else col
for col in columns
]
self._impl = amplpython.DataFrame.factory(
len(index_names),
list(index_names) + list(column_names),
len(index_names) + len(column_names)
)
for col in index:
if isinstance(col, tuple):
self.setColumn(col[0], col[1])
for col in columns:
if isinstance(col, tuple):
self.setColumn(col[0], col[1])
else:
self._impl = kwargs.get('_impl', None)
def __iter__(self):
# FIXME: C++ iterators for dataframes not working with SWIG.
return (self.getRowByIndex(i) for i in range(self.getNumRows()))
def getNumCols(self):
"""
Get the total number of columns in this dataframe (indexarity + number
of values).
Returns:
The number of columns.
"""
return self._impl.getNumCols()
def getNumRows(self):
"""
Get the number of data rows in this dataframe.
Returns:
The number of rows.
"""
return self._impl.getNumRows()
def getNumIndices(self):
"""
Get the number of indices (the indexarity) of this dataframe.
Returns:
The number of indices needed to access one row of this dataframe.
"""
return self._impl.getNumIndices()
def addRow(self, *value):
"""
Add a row to the DataFrame. The size of the tuple must be equal to the
total number of columns in the dataframe.
Args:
value: A single argument with a tuple containing all the values
for the row to be added, or multiple arguments with the values for
each column.
"""
if len(value) == 1 and isinstance(value[0], (tuple, list)):
value = value[0]
assert len(value) == self.getNumCols()
self._impl.addRow(Tuple(value)._impl)
def addColumn(self, header, values=[]):
"""
Add a new column with the corresponding header and values to the
dataframe.
Args:
header: The name of the new column.
values: A list of size :func:`~amplpy.DataFrame.getNumRows` with
all the values of the new column.
"""
if len(values) == 0:
self._impl.addColumn(header)
else:
assert len(values) == self.getNumRows()
if any(isinstance(value, basestring) for value in values):
values = list(map(str, values))
self._impl.addColumnStr(header, values)
elif all(isinstance(value, Real) for value in values):
values = list(map(float, values))
self._impl.addColumnDbl(header, values)
else:
raise NotImplementedError
def getColumn(self, header):
"""
Get the specified column as a view object.
Args:
header: The header of the column.
"""
return Column(self._impl.getColumn(header))
def setColumn(self, header, values):
"""
Set the values of a column.
Args:
header: The header of the column to be set.
values: The values to set.
"""
if any(isinstance(value, basestring) for value in values):
values = list(map(str, values))
self._impl.setColumnStr(header, values, len(values))
elif all(isinstance(value, Real) for value in values):
values = list(map(float, values))
self._impl.setColumnDbl(header, values, len(values))
else:
print(values)
raise NotImplementedError
def getRow(self, key):
"""
Get a row by value of the indexing columns. If the index is not
specified, gets the only row of a dataframe with no indexing columns.
Args:
key: Tuple representing the index of the desired row.
Returns:
The row.
"""
return Row(self._impl.getRow(Tuple(key)._impl))
def getRowByIndex(self, index):
"""
Get row by numeric index.
Args:
index: Zero-based index of the row to get.
Returns:
The corresponding row.
"""
assert isinstance(index, int)
return Row(self._impl.getRowByIndex(index))
def getHeaders(self):
"""
Get the headers of this DataFrame.
Returns:
The headers of this DataFrame.
"""
headers = self._impl.getHeaders()
return tuple(
headers.getIndex(i) for i in range(self._impl.getNumCols())
)
def setValues(self, values):
"""
Set the values of a DataFrame from a dictionary.
Args:
values: Dictionary with the values to set.
"""
ncols = self.getNumCols()
nindices = self.getNumIndices()
for key, value in values.items():
key = Utils.convToList(key)
assert len(key) == nindices
value = Utils.convToList(value)
assert len(value) == ncols-nindices
self.addRow(key + value)
def toDict(self):
"""
Return a dictionary with the DataFrame data.
"""
d = {}
nindices = self.getNumIndices()
for i in range(self.getNumRows()):
row = list(self.getRowByIndex(i))
if nindices > 1:
key = tuple(row[:nindices])
elif nindices == 1:
key = row[0]
else:
key = None
if len(row) - nindices == 0:
d[key] = None
elif len(row) - nindices == 1:
d[key] = row[nindices]
else:
d[key] = tuple(row[nindices:])
return d
def toList(self):
"""
Return a list with the DataFrame data.
"""
if self.getNumCols() > 1:
return [
tuple(self.getRowByIndex(i))
for i in range(self.getNumRows())
]
else:
return [
self.getRowByIndex(i)[0]
for i in range(self.getNumRows())
]
def toPandas(self):
"""
Return a pandas DataFrame with the DataFrame data.
"""
assert pd is not None
nindices = self.getNumIndices()
headers = self.getHeaders()
columns = {
header: list(self.getColumn(header))
for header in headers[nindices:]
}
index = zip(*[
list(self.getColumn(header))
for header in headers[:nindices]
])
index = [key if len(key) > 1 else key[0] for key in index]
if index == []:
return pd.DataFrame(columns, index=None)
else:
return pd.DataFrame(columns, index=index)
@classmethod
def fromDict(cls, dic, index_names=None, column_names=None):
"""
Create a :class:`~amplpy.DataFrame` from a dictionary.
Args:
dic: dictionary to load.
index_names: index names to use.
column_names: column names to use.
"""
assert isinstance(dic, dict)
assert len(dic) != 0
def to_tuple(e):
if isinstance(e, (tuple, list)):
return tuple(e)
else:
return (e,)
lst_index = list(map(to_tuple, dic.keys()))
lst_columns = list(map(to_tuple, dic.values()))
nindices, ncolumns = len(lst_index[0]), len(lst_columns[0])
assert index_names is None or nindices == len(index_names)
assert column_names is None or ncolumns == len(column_names)
assert all(len(k) == nindices for k in lst_index)
assert all(len(v) == ncolumns for v in lst_columns)
index = zip(*lst_index)
columns = zip(*lst_columns)
if index_names is None:
index_names = ['index{}'.format(i) for i in range(nindices)]
if column_names is None:
column_names = ['value{}'.format(i) for i in range(ncolumns)]
index = [
(index_names[i], cindex)
for i, cindex in enumerate(zip(*lst_index))
]
columns = [
(column_names[i], column)
for i, column in enumerate(zip(*lst_columns))
]
return cls(index=index, columns=columns)
@classmethod
def fromPandas(cls, df, index_names=None):
"""
Create a :class:`~amplpy.DataFrame` from a pandas DataFrame.
Args:
df: Pandas DataFrame to load.
index_names: index names to use.
"""
assert pd is not None
if isinstance(df, pd.Series):
df =
|
pd.DataFrame(df)
|
pandas.DataFrame
|
'''
Group enabled ANPNetwork class and supporting classes.
'''
from pyanp.pairwise import Pairwise
from pyanp.prioritizer import Prioritizer, PriorityType
from pyanp.general import islist, unwrap_list, get_matrix, matrix_as_df
from typing import Union
import pandas as pd
from copy import deepcopy
from pyanp.limitmatrix import normalize, calculus, priority_from_limit
import numpy as np
import re
from pyanp.rating import Rating
class ANPNode:
'''
A node inside a cluster, inside a netowrk. The basic building block of
an ANP netowrk.
:param network: An ANPNetwork object that this node lives inside.
:param cluster: An ANPCluster object that this node lives inside.
:param name: The name of this node.
'''
def __init__(self, network, cluster, name:str):
self.name = name
self.cluster = cluster
self.network = network
self.node_prioritizers = {}
self.subnetwork = None
self.invert = False
def is_node_cluster_connection(self, dest_cluster:str)->bool:
'''
Is this node connected to a cluster.
:param dest_cluster: The name of the cluster
:return: True/False
'''
if dest_cluster in self.node_prioritizers:
return True
else:
return False
def node_connect(self, dest_node)->None:
''''
Make a node connection from this node to dest_node
:param dest_node: The destination node as a str, int, or ANPNode. It
can be a list of nodes, and then we will coonect each node from
this node. The dest_node should be in any format accepted by
ANPNetwork._get_node()
'''
if islist(dest_node):
for dn in dest_node:
self.node_connect(dn)
else:
prioritizer = self.get_node_prioritizer(dest_node, create=True)
prioritizer.add_alt(dest_node, ignore_existing=True)
#Make sure parent clusters are connected
src_cluster = self.cluster
dest_cluster = self.network._get_node_cluster(dest_node)
src_cluster.cluster_connect(dest_cluster)
def get_node_prioritizer(self, dest_node, create=False,
create_class=Pairwise, dest_is_cluster=False)->Prioritizer:
'''
Gets the node prioritizer for the other_node
:param dest_node: The node as a int, str, or ANPNode object.
:return: The prioritizer if it exists, or None
'''
if dest_is_cluster:
dest_cluster = self.network.cluster_obj(dest_node)
dest_name = dest_cluster.name
else:
dest_cluster = self.network._get_node_cluster(dest_node)
dest_name = dest_cluster.name
if dest_name not in self.node_prioritizers:
if create:
prioritizer = create_class()
self.node_prioritizers[dest_name] = prioritizer
return prioritizer
else:
return None
else:
return self.node_prioritizers[dest_name]
def is_node_node_connection(self, dest_node)->bool:
'''
Checks if there is a node connection from this node to dest_node
:param dest_node: The node as a int, str, or ANPNode object.
:return:
'''
pri = self.get_node_prioritizer(dest_node)
if pri is None:
return False
elif not pri.is_alt(dest_node):
return False
else:
return True
def get_unscaled_column(self, username=None)->pd.Series:
'''
Returns the column in the unscaled supermatrix for this node.
:param username: The user/users to do this for. Typical Prioritizer
calculation usage, i.e. None means do for all group average.
:return: A pandas series indexed by the node names.
'''
nnodes = self.network.nnodes()
rval = pd.Series(data=[0.0]*nnodes, index=self.network.node_names())
prioritizer:Prioritizer
for prioritizer in self.node_prioritizers.values():
vals = prioritizer.priority(username, PriorityType.NORMALIZE)
for alt, val in vals.iteritems():
rval[alt] = val
return rval
def data_names(self, append_to=None):
'''
Used when exporting an Excel header for a network, for its data.
:param append_to: If not None, append header strings to this list.
Otherwise we create a new list to append to.
:return: List of strings of comparison name headers. If append_to is not
None, we return append_to with the new string headers appended.
'''
if append_to is None:
append_to = []
pri:Prioritizer
for pri in self.node_prioritizers.values():
pri.data_names(append_to, post_pend="wrt "+self.name)
return append_to
def set_node_prioritizer_type(self, destNode, prioritizer_class):
'''
Sets the node prioritizer type
:param destNode: An ANPNode object, string, or integer location
:param prioritizer_class: The new type
:return: None
'''
pri = self.get_node_prioritizer(destNode, create_class=prioritizer_class)
if not isinstance(pri, prioritizer_class):
#Wrong type, get alts from this one, and create correct one
rval = prioritizer_class()
rval.add_alt(pri.alt_names())
dest_cluster = self.network._get_node_cluster(destNode)
dest_name = dest_cluster.name
self.node_prioritizers[dest_name] = rval
else:
pass
class ANPCluster:
'''
A cluster in an ANP object
:param network: The ANPNetowrk object this cluster is in.
:param name: The name of the cluster to create.
'''
def __init__(self, network, name:str):
self.prioritizer = Pairwise()
self.name = name
self.network = network
# The list of ANP nodes in this cluster
self.nodes = {}
def add_node(self, *nodes)->None:
"""
Adds one or more nodes
:param nodes: A vararg list of node names to add to this cluster.
The names should all be strings.
:return: Nonthing
"""
nodes = unwrap_list(nodes)
if islist(nodes):
for node in nodes:
if isinstance(node, str):
self.add_node(node)
else:
self.nodes[nodes] = ANPNode(self.network, self, nodes)
def nnodes(self)->int:
"""
:return: The number of nodes in this cluster.
"""
return len(self.nodes)
def is_node(self, node_name:str)->bool:
'''
Does a node by that name exist in this cluster
:param node_name: The name of the node to look for
:return: True/False
'''
return node_name in self.nodes
def node_obj(self, node_name):
"""
Get a node in this cluster.
:param node_name: The node as either a string name, integer position, or
simply the ANPObject, in which case there is nothing to do except
return it.
:return: ANPNode object. If it wasn't found, None is returned.
"""
if isinstance(node_name, ANPNode):
return node_name
else:
return get_item(self.nodes, node_name)
def node_names(self)->list:
'''
:return: List of the string names of the nodes in this cluster
'''
return list(self.nodes.keys())
def node_objs(self)->list:
'''
:return: List of the ANPNode objects in this cluster.
'''
return self.nodes.values()
def cluster_connect(self, dest_cluster)->None:
"""
Make a cluster->cluster connection from this node to the destination.
:param dest_cluster: Either the ANPCluster object to connect to, or
the name of the destination cluster.
:return:
"""
if isinstance(dest_cluster, ANPCluster):
dest_cluster_name = dest_cluster.name
else:
dest_cluster_name = dest_cluster
self.prioritizer.add_alt(dest_cluster_name, ignore_existing=True)
def set_prioritizer_type(self, prioritizer_class)->None:
'''
Sets the cluster prioritizer type
:param prioritizer_class: The new type
:return: None
'''
pri = self.prioritizer
if not isinstance(pri, prioritizer_class):
#Wrong type, get alts from this one, and create correct one
rval = prioritizer_class()
rval.add_alt(pri.alt_names())
self.prioritizer = rval
else:
pass
def data_names(self, append_to=None):
'''
Used when exporting an Excel header for a network, for its data.
:param append_to: If not None, append header strings to this list.
Otherwise we create a new list to append to.
:return: List of strings of comparison name headers. If append_to is not
None, we return append_to with the new string headers appended.
'''
if append_to is None:
append_to = []
if self.prioritizer is not None:
self.prioritizer.data_names(append_to, post_pend="wrt "+self.name)
return append_to
def get_item(tbl:dict, key):
"""
Looks up an item in a dictionary by key first, assuming the key is in the
dictionary. Otherwise, it checks if the key is an integer, and returns
the item in that position.
:param tbl: The dictionary to look in
:param key: The key, or integer position to get the item of
:return: The item, or it not found, None
"""
if key in tbl:
return tbl[key]
elif not isinstance(key, int):
return None
# We have an integer key by this point
if key < 0:
return None
elif key >= len(tbl):
return None
else:
count = 0
for rval in tbl.values():
if count == key:
return rval
count+=1
#Should never make it here
raise ValueError("Shouldn't happen in anp.get_item")
__CLEAN_SPACES_RE = re.compile('\\s+')
def clean_name(name:str)->str:
"""
Cleans up a string for usage by:
1. stripping off begging and ending spaces
2. All spaces convert to one space
3. \t and \n are treated like a space
:param name: The string name to be cleaned
:return: The cleaned name.
"""
rval = name.strip()
return __CLEAN_SPACES_RE.sub(string=rval, repl=' ')
def sum_subnetwork_formula(priorities:pd.Series, dict_of_series:dict):
"""
A function that takes the weighted sum of values. Used for synthesis.
:param priorities: Series whose index are the nodes with subnetworks and
values are their weights.
:param dict_of_series: A dictionary whose keys are the same as the keys of
priorities, i.e. the nodes with subnetworks. The values are Series
whose keys are alternative names and values are the synthesized
alternative scores under that subnetwork.
:return:
"""
subpriorities = priorities[dict_of_series.keys()]
if sum(subpriorities) != 0:
subpriorities /= sum(subpriorities)
rval = pd.Series()
counts = pd.Series(dtype=int)
for subnet_name, vals in dict_of_series.items():
priority = subpriorities[subnet_name]
for alt_name, val in vals.iteritems():
if alt_name in rval:
rval[alt_name] += val * priority
counts[alt_name] += priority
else:
rval[alt_name] = val
counts[alt_name] = priority
# Now let's calculate the averages
for alt_name, val in rval.iteritems():
if counts[alt_name] > 0:
rval[alt_name] /= counts[alt_name]
return rval
class ANPNetwork(Prioritizer):
'''
Represents an ANP prioritizer. Has clusters/nodes, comparisons, etc.
:param create_alts_cluster: If True (which is the default) we start with a
cluster that is the alternatives cluster. Otherwise the model starts
empty.
'''
def __init__(self, create_alts_cluster=True):
self.clusters = {}
if create_alts_cluster:
cl = self.add_cluster("Alternatives")
self.alts_cluster = cl
self.users=[]
self.limitcalc = calculus
self.subnet_formula = sum_subnetwork_formula
self.default_priority_type = None
def add_cluster(self, *args)->ANPCluster:
'''
Adds one or more clusters to a network
:param args: Can be either a single string, or a list of strings
:return: ANPCluster object or list of ANPCluster objects
'''
clusters = unwrap_list(args)
if islist(clusters):
rval = []
for cl in clusters:
rval.append(self.add_cluster(cl))
return rval
else:
#Adding a single cluster
cl = ANPCluster(self, clusters)
self.clusters[clusters] = cl
return cl
def cluster_names(self)->list:
'''
:return: List of string names of the clusters
'''
return list(self.clusters.keys())
def nclusters(self)->int:
'''
:return: The number of clusters in the network.
'''
return len(self.clusters)
def cluster_obj(self, cluster_info:Union[ANPCluster, str])->ANPCluster:
'''
Returns the cluster with given information
:param cluster_info: Either the name of the cluster object to get
or the cluster object, or its int position
:return: The ANPCluster object
'''
if isinstance(cluster_info, ANPCluster):
return cluster_info
else:
return get_item(self.clusters, cluster_info)
def add_node(self, cl, *nodes):
'''
Adds nodes to a cluster
:param cl: The cluster name or object
:param nodes: The name or names of the nodes
:return: Nothing
'''
cluster = self.cluster_obj(cl)
cluster.add_node(nodes)
def nnodes(self, cluster=None)->int:
"""
Returns the number of nodes in the network, or a cluster.
:param cluster: If None, we return the number of nodes in the network.
Otherwise this is the integer position, string name, or ANPCluster
object of the cluster to get the node count within.
:return: The count.
"""
if cluster is None:
rval = pd.Series()
for cname, cluster in self.clusters.items():
rval[cname] = cluster.nnodes()
return sum(rval)
else:
clobj = self.cluster_obj(cluster)
return clobj.nnodes()
def add_alt(self, alt_name:str):
"""
Adds an alternative to the model:
1. Adds the altenrative to alts_cluster if not None
2. For each node with a subnetwork, we add the alternative to that subnetwork.
:param alt_name: The name of the alternative to add
:return: Nothing
"""
if self.alts_cluster is not None:
self.add_node(self.alts_cluster, alt_name)
# We should add this alternative to each subnetwork
for node in self.node_objs_with_subnet():
node.subnetwork.add_alt(alt_name)
def is_user(self, uname)->bool:
'''
Checks if a user exists
:param uname: The name of the user to check for
:return: bool
'''
return uname in self.users
def is_alt(self, altname)->bool:
'''
Checks if an alternative exists
:param altname: The alterantive name to look for
:return: bool
'''
return self.alts_cluster.is_node(altname)
def add_user(self, uname, ignore_dupe=False):
'''
Adds a user to the system
:param uname: The name of the new user
:return: Nothing
:raise ValueError If the user already existed
'''
if islist(uname):
for un in uname:
self.add_user(un, ignore_dupe=ignore_dupe)
return
if self.is_user(uname):
if not ignore_dupe:
raise ValueError("User by the name "+uname+" already existed")
else:
return
self.users.append(uname)
def nusers(self)->int:
'''
:return: The number of users
'''
return len(self.users)
def user_names(self)->list:
'''
:return: List of names of the users
'''
return deepcopy(self.users)
def node_obj(self, node_name)->ANPNode:
'''
Gets the ANPNode object of the node with the given name
:param node_name: The name of the node to get, or it's overall integer
position, or the ANPNode object itself
:return: The ANPNode if it exists, or None
'''
if isinstance(node_name, ANPNode):
return node_name
elif isinstance(node_name, int):
#Reference by integer
node_pos = node_name
node_count = 0
for cluster in self.clusters.values():
rel_pos = node_pos - node_count
if rel_pos < cluster.nnodes():
return cluster.node_obj(rel_pos)
#If we make it here, we were out of bounds
return None
#Okay handle string node name
cluster: ANPCluster
for cname, cluster in self.clusters.items():
rval = cluster.node_obj(node_name)
if rval is not None:
return rval
#Made it here, the node didn't exist
return None
def _get_node_cluster(self, node)->ANPCluster:
'''
Gets the ANPCluster object a node lives in
:param node: The name/integer positions, or ANPNode object itself. See
node_obj() method for more details.
:return: The ANPCluster object this node lives in, or None if it doesn't
exist.
'''
n = self.node_obj(node)
if n is None:
# Could not find the node
return None
return n.cluster
def node_connect(self, src_node, dest_node):
'''
connects 2 nodes
:param src_node: Source node as prescribed by node_object() function
:param dest_node: Destination node as prescribed by node_object() function
:return: Nothing
'''
src = self.node_obj(src_node)
src.node_connect(dest_node)
def node_names(self, cluster=None)->list:
'''
Returns a list of nodes in this network, organized by cluster
:param cluster: If None, we get all nodes in network, else we get nodes
in that cluster, otherwise format as specified by cluster_obj() function.
:return: List of strs of node names
'''
if cluster is not None:
cl = self.cluster_obj(cluster)
return cl.node_names()
rval = []
cl:ANPCluster
for cl in self.clusters.values():
cnodes = cl.node_names()
for name in cnodes:
rval.append(name)
return rval
def node_objs(self)->list:
'''
Returns a list of ANPNodes in this network, organized by cluster
:return: List of strs of node names
'''
rval = []
cl:ANPCluster
for cl in self.clusters.values():
cnodes = cl.node_objs()
for name in cnodes:
rval.append(name)
return rval
def cluster_objs(self)->list:
"""
:return: List of ANPCluster objects in the network
"""
return list(self.clusters.values())
def node_connections(self)->np.ndarray:
"""
Returns the node conneciton matrix for this network.
:return: A numpy array of shape [nnode, nnodes] where item [row, col]
1 means there is a node connection from col -> row, and 0 means
no connection.
"""
nnodes = self.nnodes()
nnames = self.node_names()
rval = np.zeros([nnodes, nnodes])
src_node:ANPNode
for src in range(nnodes):
srcname = nnames[src]
src_node = self.node_obj(srcname)
for dest in range(nnodes):
dest_name = nnames[dest]
if src_node.is_node_node_connection(dest_name):
rval[dest,src]=1
return rval
def unscaled_supermatrix(self, username=None, as_df=False)->np.array:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The unscaled supermatrix as a numpy.array of shape [nnode, nnodes]
'''
nnodes = self.nnodes()
rval = np.zeros([nnodes, nnodes])
nodes = self.node_objs()
col = 0
node:ANPNode
for node in nodes:
rval[:,col] = node.get_unscaled_column(username)
col += 1
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def scaled_supermatrix(self, username=None, as_df=False)->np.ndarray:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The scaled supermatrix
'''
rval = self.unscaled_supermatrix(username)
# Now I need to normalized by cluster weights
clusters = self.cluster_objs()
nclusters = len(clusters)
col = 0
for col_cp in range(nclusters):
col_cluster:ANPCluster = clusters[col_cp]
row_nnodes = col_cluster.nnodes()
cluster_pris = col_cluster.prioritizer.priority(username, PriorityType.NORMALIZE)
row_offset = 0
for col_node in col_cluster.node_objs():
row=0
for row_cp in range(nclusters):
row_cluster:ANPCluster = clusters[row_cp]
row_cluster_name = row_cluster.name
if row_cluster_name in cluster_pris:
priority = cluster_pris[row_cluster_name]
else:
priority = 0
for row_node in row_cluster.node_objs():
rval[row, col] *= priority
row += 1
col += 1
normalize(rval, inplace=True)
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def global_priority(self, username=None)->pd.Series:
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:return: The global priorities Series, index by node name
'''
lm = self.limit_matrix(username)
rval = priority_from_limit(lm)
node_names = self.node_names()
return pd.Series(data=rval, index=node_names)
def global_priority_df(self, user_infos=None)->pd.DataFrame:
'''
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: The global priorities dataframe. Rows are the nodes and
columns are the users. The first user/column is the Group Average
'''
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval = pd.DataFrame()
for user in user_infos:
if user is None:
uname = "Group Average"
else:
uname = user
rval[uname] = self.global_priority(user)
return rval
def limit_matrix(self, username=None, as_df=False):
'''
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param as_df: If True, returns as a dataframe with index and column
names as the names of the nodes in the network. Otherwise just
returns the array.
:return: The limit supermatrix
'''
sm = self.scaled_supermatrix(username)
rval = self.limitcalc(sm)
if not as_df:
return rval
else:
return matrix_as_df(rval, self.node_names())
def alt_names(self)->list:
'''
:return: List of alt names in this ANP model
'''
if self.has_subnet():
# We have some v1 subnetworks, we get alternative names by looking
# there.
rval = []
node: ANPNode
for node in self.node_objs_with_subnet():
alts = node.subnetwork.alt_names()
for alt in alts:
if alt not in rval:
rval.append(alt)
return rval
else:
return self.alts_cluster.node_names()
def priority(self, username=None, ptype:PriorityType=None)->pd.Series:
'''
Synthesize and return the alternative scores
:param username: If None, gets it for all users. Otherwise gets it for
the user specified. It can also be a list of users, in which case
we combine them, as per the theory.
:param ptype: The priority type to use
:return: A pandas.Series indexed on alt names, values are the score
'''
if ptype is None:
# Use the default priority type for this network
ptype = self.default_priority_type
if self.has_subnet():
# Need to synthesize using subnetworks
return self.subnet_synthesize(username=username, ptype=ptype)
else:
gp = self.global_priority(username)
alt_names = self.alt_names()
rval = gp[alt_names]
if sum(rval) != 0:
rval /= sum(rval)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def data_names(self):
'''
Returns the column headers needed to fill in the data for this model
:return: A list of strings that would be usable in excel for parsing
headers
'''
node:ANPNode
rval = []
cluster: ANPCluster
for cluster in self.cluster_objs():
cluster.data_names(rval)
for node in self.node_objs():
node.data_names(rval)
return rval
def node_connection_matrix(self, new_mat:np.ndarray=None):
'''
Returns the current node conneciton matrix if new_mat is None.
Otherwise, for each item [row, col] in the matrix with a value of 1
we connect from node[row] to node[col].
:param new_mat: The new node connection matrix. If None, we return
the current one.
:return: Current connection matrix.
'''
src_node:ANPNode
nnodes = self.nnodes()
nodes = self.node_objs()
node_names = self.node_names()
if new_mat is not None:
for src_node_pos in range(nnodes):
src_node = nodes[src_node_pos]
for dest_node_pos in range(nnodes):
if new_mat[dest_node_pos, src_node_pos] != 0:
src_node.node_connect(node_names[dest_node_pos])
rval = np.zeros([nnodes, nnodes])
for src_node_pos in range(nnodes):
src_node = nodes[src_node_pos]
for dest_node_pos in range(nnodes):
if src_node.is_node_node_connection(node_names[dest_node_pos]):
rval[dest_node_pos, src_node_pos] = 1
return rval
def import_pw_series(self, series:pd.Series)->None:
'''
Takes in a well titled series of data, and pushes it into the right
node's prioritizer (or cluster).
The name should be A vs B wrt C, where A, B, C are node or cluster names.
:param series: The series of data for each user. Index is usernames.
Values are the votes.
:return: Nothing
'''
name = series.name
name = clean_name(name)
info = name.split(' wrt ')
if len(info) < 2:
# We cannot do anything with this, we need a wrt
raise ValueError("No wrt in "+name)
wrt = info[1].strip()
wrtNode:ANPNode
wrtNode = self.node_obj(wrt)
info = info[0].split( ' vs ')
if len(info) < 2:
raise ValueError(" vs was not present in "+name)
row, col = info
rowNode = self.node_obj(row)
colNode = self.node_obj(col)
npri: Pairwise
if (wrtNode is not None) and (rowNode is not None) and (colNode is not None):
# Node pairwise
npri = wrtNode.get_node_prioritizer(rowNode, create=True)
#print("Node comparison "+name)
if not isinstance(npri, Pairwise):
raise ValueError("Node prioritizer was not pairwise")
npri.vote_series(series, row, col, createUnknownUser=True)
self.add_user(series.index, ignore_dupe=True)
else:
# Try cluster pairwise
wrtcluster = self.cluster_obj(wrt)
rowcluster = self.cluster_obj(row)
colcluster = self.cluster_obj(col)
if wrtcluster is None:
raise ValueError("wrt="+wrt+" was not a cluster, and the group was not a node comparison")
if rowcluster is None:
raise ValueError("row="+row+" was not a cluster, and the group was not a node comparison")
if colcluster is None:
raise ValueError("col="+col+" was not a cluster, and the group was not a node comparison")
npri = self.cluster_prioritizer(wrtcluster)
npri.vote_series(series, row, col, createUnknownUser=True)
self.add_user(series.index, ignore_dupe=True)
#print("Cluster comparison "+name)
def set_alts_cluster(self, new_cluster):
'''
Sets the new alternatives cluster
:param new_cluster: Cluster specified as cluster_obj() expects.
:return: Nothing
'''
cl = self.cluster_obj(new_cluster)
self.alts_cluster = cl
def import_rating_series(self, series:pd.Series):
'''
Takes in a well titled series of data, and pushes it into the right
node's prioritizer as ratings (or cluster).
Title should be A wrt B, where A and B are either both node names or
both column names.
:param series: The series of data for each user. Index is usernames.
Values are the votes.
:return: Nothing
'''
name = series.name
name = clean_name(name)
info = name.split(' wrt ')
if len(info) < 2:
# We cannot do anything with this, we need a wrt
raise ValueError("No wrt in "+name)
wrt = info[1].strip()
dest = info[0].strip()
wrtNode:ANPNode
destNode:ANPNode
wrtNode = self.node_obj(wrt)
destNode = self.node_obj(dest)
npri:Rating
if (wrtNode is not None) and (destNode is not None):
# Node ratings
npri = wrtNode.get_node_prioritizer(destNode, create=True, create_class=Rating)
if not isinstance(npri, Rating):
wrtNode.set_node_prioritizer_type(destNode, Rating)
npri = wrtNode.get_node_prioritizer(destNode, create=True)
npri.vote_column(votes=series, alt_name=dest, createUnknownUsers=True)
else:
# Trying cluster ratings
wrtcluster = self.cluster_obj(wrt)
destcluster = self.cluster_obj(dest)
if wrtcluster is None:
raise ValueError("Ratings: wrt is not a cluster wrt="+wrt+" and wasn't a node either")
if destcluster is None:
raise ValueError("Ratings: dest is not a cluster dest="+dest+" and wasn't a node either")
npri = wrtcluster.prioritizer
if not isinstance(npri, Rating):
wrtcluster.set_prioritizer_type(Rating)
npri = wrtcluster.prioritizer
npri.vote_column(votes=series, alt_name=dest, createUnknownUsers=True)
def node_prioritizer(self, wrtnode=None, cluster=None):
'''
Gets the prioritizer for node->cluster connection
:param wrtnode: The node as understood by node_obj() function.
:param cluster: Cluster as understood by cluster_obj() function.
:return: If both wrtnode and cluster are specified, a single node prioritizer
is returned for that comparison (or None if there was nothing there).
Otherwise it returns a dictionary indexed by [wrtnode, cluster] and
whose values are the prioritizers for that (only the non-None ones).
'''
if wrtnode is not None and cluster is not None:
node = self.node_obj(wrtnode)
cl_obj = self.cluster_obj(cluster)
cluster_name = cl_obj.name
return node.get_node_prioritizer(dest_node=cluster_name, dest_is_cluster=True)
elif wrtnode is not None:
# Have wrtnode, do not have cluster
rval = {}
for cluster in self.cluster_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
elif cluster is not None:
# Have cluster, but not wrtnode
rval = {}
for wrtnode in self.node_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
else:
# Both wrtnode and cluster are none, want all
rval = {}
for wrtnode in self.node_names():
for cluster in self.cluster_names():
pri = self.node_prioritizer(wrtnode, cluster)
if pri is not None:
rval[(wrtnode, cluster)] = pri
return rval
def subnet(self, wrtnode):
'''
Makes wrtnode have a subnetwork if it did not already.
:param wrtnode: The node to give a subnetwork to, or get the subnetwork
of. Node specified as node_obj() function expects.
:return: The ANPNetwork that is the subnet of this node
'''
node = self.node_obj(wrtnode)
if node.subnetwork is not None:
return node.subnetwork
else:
rval = ANPNetwork(create_alts_cluster=False)
node.subnetwork = rval
rval.default_priority_type = PriorityType.IDEALIZE
return rval
def node_invert(self, node, value=None):
'''
Either sets, or tells if a node is inverted
:param node: The node to do this on, as expected by node_obj() function
:param value: If None, we return the boolean about if this node is
inverted. Otherwise specifies the new value.
:return: T/F if value=None, telling if the node is inverted. Otherwise
returns nothing.
'''
node = self.node_obj(node)
if value is None:
return node.invert
else:
node.invert = value
def has_subnet(self)->bool:
'''
:return: True/False telling if some node had a subentwork
'''
for node in self.node_objs():
if node.subnetwork is not None:
return True
return False
def subnet_synthesize(self, username=None, ptype:PriorityType=None):
'''
Does the standard V1 subnetowrk synthesis.
:param username: The user/users to synthesize for. If None, we group
synthesize across all. If a single user, we sythesize for that user
across all. If it is a list, we synthesize for the group that is that
list of users.
:return: Nothing
'''
# First we need our global priorities
pris = self.global_priority(username)
# Next we need the alternative priorities from each subnetwork
subnets = {}
node:ANPNode
for node in self.node_objs_with_subnet():
p = node.subnetwork.priority(username, ptype)
if node.invert:
p = self.invert_priority(p)
subnets[node.name]=p
rval = self.synthesize_combine(pris, subnets)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def node_objs_with_subnet(self):
"""
:return: List of ANPNode objects in this network that have v1 subnets
"""
return [node for node in self.node_objs() if node.subnetwork is not None]
def invert_priority(self, p):
"""
Makes a copy of the list like element p, and inverts. The current
standard inversion is 1-p. There could be others implemented later.
:param p: The list like to invert
:return: New list-like of same type as p, with inverted priorities
"""
rval = deepcopy(p)
for i in range(len(p)):
rval[i] = 1 - rval[i]
return rval
def synthesize_combine(self, priorities:pd.Series, alt_scores:dict):
"""
Performs the actual sythesis step from anp v1 synthesis.
:param priorities: Priorities of the subnetworks
:param alt_scores: Alt scores as dictionary, keys are subnetwork names
values are Series whose keys are alt names.
:return: Series whose keys are alt names, and whose values are the
synthesized scores.
"""
return self.subnet_formula(priorities, alt_scores)
def cluster_prioritizer(self, wrtcluster=None):
"""
Gets the prioritizer for the clusters wrt a given cluster.
:param wrtcluster: WRT cluster identifier as expected by cluster_obj() function.
If None, then we return a dictionary indexed by cluster names and values
are the prioritizers
:return: THe prioritizer for that cluster, or a dictionary of all cluster
prioritizers
"""
if wrtcluster is not None:
cluster = self.cluster_obj(wrtcluster)
return cluster.prioritizer
else:
rval = {}
for cluster in self.cluster_objs():
rval[cluster.name] = cluster.prioritizer
return rval
def to_excel(self, fname):
struct = pd.DataFrame()
cluster:ANPCluster
writer = pd.ExcelWriter(fname, engine='openpyxl')
for cluster in self.cluster_objs():
cluster_name = cluster.name
if cluster == self.alts_cluster:
cluster_name = "*"+str(cluster_name)
struct[cluster_name] = cluster.node_names()
struct.to_excel(writer, sheet_name="struct", index=False)
# Now the node connections
mat = self.node_connection_matrix()
pd.DataFrame(mat).to_excel(writer, sheet_name="connection", index=False, header=False)
# Lastly let's write just the comparison structure
cmp = self.data_names()
pd.DataFrame({"":cmp}).to_excel(writer, sheet_name="votes", index=False, header=True)
writer.save()
writer.close()
def cluster_incon_std_df(self, user_infos=None) -> pd.DataFrame:
"""
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: DataFrame whose columns are clusters, rows
are users (as controlled by user_infos params) and the value is
the inconsistency for the given user on the given comparison.
"""
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval = pd.DataFrame()
# We need the name for the group (i.e. None) to be something useful)
for cluster, pw in self.cluster_prioritizer().items():
if isinstance(pw, Pairwise):
incon = [pw.incon_std(user) for user in user_infos]
rval[cluster] = pd.Series(incon, index=user_infos)
if None in rval.index:
rval = rval.rename(
lambda x: x if x is not None else "Group Average")
return rval
def node_incon_std_df(self, user_infos=None)->pd.DataFrame:
"""
:param user_infos: A list of users to do this for, if None is a part
of this list, it means group average. If None, it defaults to
None plus all users.
:return: DataFrame whose columns are (node,cluster) pairs, rows
are users (as controlled by user_infos params) and the value is
the inconsistency for the given user on the given comparison.
"""
if user_infos is None:
user_infos = list(self.user_names())
user_infos.insert(0, None)
rval = pd.DataFrame()
# We need the name for the group (i.e. None) to be something useful)
for info, pw in self.node_prioritizer().items():
if isinstance(pw, Pairwise):
incon = [pw.incon_std(user) for user in user_infos]
rval[info] = pd.Series(incon, index=user_infos)
if None in rval.index:
rval = rval.rename(lambda x: x if x is not None else "Group Average")
return rval
def set_pairwise_from_supermatrix(self, mat, username="Imported"):
"""
Sets up all pairwise comparisons from supermatrix
:param mat: As numpy array
:return: Nothing
"""
node_names = self.node_names()
nnodes = len(node_names)
## Handle node pairwise comparisons first
for wrtnode_pos in range(nnodes):
wrtnode = node_names[wrtnode_pos]
offset=0
cluster_offsets = []
for cluster in self.cluster_names():
cluster_nodes = self.node_names(cluster)
npri:Pairwise
npri = self.node_prioritizer(wrtnode, cluster)
if npri is not None and isinstance(npri, Pairwise):
nclusternodes=len(cluster_nodes)
for node_row_pos in range(nclusternodes):
for node_col_pos in range(node_row_pos+1, nclusternodes):
rownode = cluster_nodes[node_row_pos]
colnode = cluster_nodes[node_col_pos]
vr = mat[offset+node_row_pos, wrtnode_pos]
vc = mat[offset+node_col_pos, wrtnode_pos]
#print("wrt="+wrtnode+" "+str(vr)+", "+str(vc)+": "+rownode+", "+colnode)
if vr!=0 and vc!= 0:
val = vr/vc
npri.vote(username, rownode, colnode, val, createUnknownUser=True)
cluster_offsets.append(range(offset, offset+len(cluster_nodes)))
offset+=len(cluster_nodes)
## Handle cluster pairwise comparisons now
cluster_names = self.cluster_names()
nclusters = len(cluster_names)
for wrt_cluster_pos in range(nclusters):
node_range = cluster_offsets[wrt_cluster_pos]
matrix_cols:np.ndarray
matrix_cols = mat[:,node_range]
avg_cols = matrix_cols.mean(axis=1)
cluster_pris = np.array([0.0]*nclusters)
for other_cluster_pos in range(nclusters):
cluster_pris[other_cluster_pos]=0
for node_pos in cluster_offsets[other_cluster_pos]:
cluster_pris[other_cluster_pos]+=avg_cols[node_pos]
#Now we have cluster priorities, now we can compare
cpri:Pairwise
cpri = self.cluster_obj(wrt_cluster_pos).prioritizer
for row_cluster_pos in range(nclusters):
for col_cluster_pos in range(row_cluster_pos+1, nclusters):
rowcluster = cluster_names[row_cluster_pos]
colcluster = cluster_names[col_cluster_pos]
vr = cluster_pris[row_cluster_pos]
vc = cluster_pris[col_cluster_pos]
if vr!=0 and vc!=0:
val = vr/vc
cpri.vote(username, rowcluster, colcluster, val, createUnknownUser=True)
def unscaled_structurematrix(self, username=None, as_df=False, add_self_connections=False):
rval = self.unscaled_supermatrix(username=username)
for row in rval:
for i in range(len(row)):
if row[i] != 0:
row[i] = 1
if add_self_connections:
for i in range(len(rval)):
row = rval[i]
if len(row) > i:
row[i] = 1
return rval
def scaled_structurematrix(self, username=None, as_df=False):
rval = self.unscaled_structurematrix(username=username, as_df=False)
normalize(rval, inplace=True)
return self._node_matrix_as_df(rval, as_df)
def limit_structurematrix(self, username=None, as_df=False):
rval = self.scaled_structurematrix(username=username, as_df=as_df)
rval = self.limitcalc(rval)
return self._node_matrix_as_df(rval, as_df)
def structure_global_priority(self, username=None):
lm = self.limit_structurematrix(username)
rval = priority_from_limit(lm)
node_names = self.node_names()
return pd.Series(data=rval, index=node_names)
def _node_matrix_as_df(self, matrix, as_df=False):
if not as_df:
return matrix
else:
return matrix_as_df(matrix, self.node_names())
def structure_priority(self, username=None, ptype:PriorityType=None, alt_names=None)->pd.Series:
'''
'''
if ptype is None:
# Use the default priority type for this network
ptype = self.default_priority_type
gp = self.structure_global_priority(username)
if alt_names is None:
alt_names = self.alt_names()
rval = gp[alt_names]
if sum(rval) != 0:
rval /= sum(rval)
if ptype is not None:
rval = ptype.apply(rval)
return rval
def structure_cluster_priority(self, username=None, ptype:PriorityType=None, mean=False)->pd.Series:
gp = self.structure_global_priority(username)
cluster_names = self.cluster_names()
nclusters = self.nclusters()
rval = pd.Series(data=[0.0]*nclusters, index=cluster_names)
for cluster in cluster_names:
count=0
for node in self.node_names(cluster):
rval[cluster]+=gp[node]
count+=1
if mean and count > 0:
rval[cluster]/=count
return rval
__PW_COL_REGEX = re.compile('\\s+vs\\s+.+\\s+wrt\\s+')
def is_pw_col_name(col:str)->bool:
"""
Checks to see if the name matches the naming convention for a pairwise
comparison, i.e. A vs B wrt C
:param col: The title of the column to check
:return: T/F
"""
if col is None:
return False
elif isinstance(col, (float, int)) and np.isnan(col):
return False
else:
return __PW_COL_REGEX.search(col) is not None
__RATING_COL_REGEX = re.compile('\\s+wrt\\s+')
def is_rating_col_name(col:str)->bool:
"""
Checks to see if the name matches the naming convention for a rating
column of data, i.e. A wrt B
:param col: The name of the column
:return: T/F
"""
if col is None:
return False
elif isinstance(col, (float, int)) and np.isnan(col):
return False
elif is_pw_col_name(col):
return False
else:
return __RATING_COL_REGEX.search(col) is not None
def anp_manual_scales_from_excel(anp:ANPNetwork, excel_fname):
"""
Parses manual rating scales from an Excel file
:param anp: The model to put the scale values in.
:param excel_fname: The string file name of the excel file with the data
:return: Nothing
"""
xl = pd.ExcelFile(excel_fname)
if "scales" not in xl.sheet_names:
# We have no scales, do nothing
return
# Scales exist, read in
df = xl.parse(sheet_name="scales")
for scale_info in df:
# See if it has a wrt and whatnot
pieces = scale_info.split(" wrt ")
if len(pieces) == 2:
# Found one
cluster = pieces[0].strip()
wrtnode = pieces[1].strip()
scale_data = {}
for item in df[scale_info]:
name, val = str(item).split("=")
name = name.lower().strip()
val = float(val)
scale_data[name]=[val]
rating:Rating
rating = anp.node_prioritizer(wrtnode, cluster)
#print(scale_data)
rating.set_word_eval(scale_data)
# We are done!
def anp_from_excel(excel_fname:str)->ANPNetwork:
"""
Parses an excel file to get an ANPNetwork
:param excel_fname: The name of the excel file
:return: The newly created ANPNetwork object
"""
## Structure first
df = pd.read_excel(excel_fname, sheet_name=0)
anp = ANPNetwork(create_alts_cluster=False)
for col in df:
if col.startswith("*"):
is_alt = True
cname = col[1:len(col)]
else:
is_alt = False
cname = col
anp.add_cluster(cname)
anp.add_node(cname, df[col])
if is_alt:
anp.set_alts_cluster(cname)
## Now conneciton data
conn_mat = get_matrix(excel_fname, sheet=1)
#print(conn_mat)
#print(conn_mat)
#print(conn_mat.shape)
anp.node_connection_matrix(conn_mat)
#If the matrix is full of floating points, we assume it is a scaled supermatrix
if conn_mat.dtype == np.dtype('float'):
anp.set_pairwise_from_supermatrix(conn_mat)
## Now pairwise data
xl = pd.ExcelFile(excel_fname)
if len(xl.sheet_names) <= 2:
# No pairwise data, please stop
return anp
df =
|
pd.read_excel(excel_fname, sheet_name=2)
|
pandas.read_excel
|
'''
Author: <NAME>
@License: MIT, See License.txt at root of project.
Method calls to this object serve the purpose of preparing data for the model and saving/loading the scale used to train the model.
The same scale must be used to standardise data used for predicitions which is why this is important. There also exists a method
for performing PCA analysis on data.
'''
import pandas as pd
import numpy as np
import random
from sklearn.model_selection import train_test_split #splitting up data for testing
from sklearn import preprocessing #standardisation of data
from sklearn.externals import joblib #saving/loading the scale used for standardising the data during training
from sklearn.decomposition import PCA #principal component analysis
import random
class Standardiser:
def __init__(self):
pass
#load in data, then standardise and split it for training/testing.
def initialise(self):
print("standardiser initialising")
self.data = self.loadData()
self.X_train, self.X_test, self.y_train, self.y_test = self.splitData(self.data)
self.std_scale, self.X_train_std, self.X_test_std = self.standardise(self.X_train, self.X_test, self.y_train, self.y_test)
#getter functions for retreiving split standardised data
def get_std_X_train(self):
return self.X_train_std
def get_std_X_test(self):
return self.X_test_std
def get_y_train(self):
return self.y_train
def get_y_test(self):
return self.y_test
#read in the data as a pandas dataframe. ignore the first row (headers) and only use columns PCA deemed good for training on.
def loadData(self):
df = pd.io.parsers.read_csv(
'Data/NewBalanced.csv',
header=None,
skiprows = [0],
usecols=[5,10,15,17,18,19,20,22])
return df
#loads in Darkskies weather predictions for the coming week.
def loadForecast(self,forecast_loc):
#load in forecast csv
self.foredf = pd.io.parsers.read_csv(
forecast_loc,
header=None,
skiprows = [0],
usecols=[1,2,3,4,5,6,7,8,9])
X_forecast = self.foredf.values[:,3:]
X_forecast = self.standardise_Pred(X_forecast)
return X_forecast
#split data into training and testing samples
def splitData(self, data):
X = data.values[:,:7]
y = data.values[:,7]
#split the data into training and testing data
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.30, random_state=random.randint(10,100000))
return X_train, X_test, y_train, y_test
#standardise the data and save the scale it was standardised on.
def standardise(self, X_train, X_test, y_train, y_test):
#standardisation using sklearn
self.std_scale = preprocessing.StandardScaler().fit(X_train)
X_train_std = self.std_scale.transform(X_train)
X_test_std = self.std_scale.transform(X_test)
self.saveScale()
return self.std_scale, X_train_std, X_test_std
#standardises data readin for prediction
def standardise_Pred(self, X_forecast):
X_forecast_std = self.std_scale.transform(X_forecast)
return X_forecast_std
#performs PCA on the traning data.
def PCAan(self, X_train_std, X_test_std, y_train):
pca_std = PCA(n_components=2).fit(X_train_std)
X_train_std = pca_std.transform(X_train_std)
X_test_std = pca_std.transform(X_test_std)
return pca_std, X_train_std, X_test_std
#appends forecast predictions (both class and probability based) to the svm input csv and then produces a new svmoutput csv
def make_CSV(self, fore_pred, fore_prob,outputfile):
print("make_CSV")
forearray = self.foredf.values.tolist()
i = 0
for element in forearray:
element.append(fore_pred[i])
element.append(fore_prob[i][1])
i +=1
df =
|
pd.DataFrame(forearray)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
# import tensorflow as tf
import tensorflow.compat.v1 as tf
def data_prepare():
tf.disable_v2_behavior()
ratings_df = pd.read_csv('./ml-latest-small/ratings.csv')
ratings_df.tail()
movies_df = pd.read_csv('./ml-latest-small/movies.csv')
movies_df.tail()
movies_df['movieRow'] = movies_df.index
movies_df.tail()
movies_df = movies_df[['movieRow', 'movieId', 'title']]
movies_df.to_csv('./ml-latest-small/moviesProcessed.csv', index=False, header=True, encoding='utf-8')
movies_df.tail()
ratings_df =
|
pd.merge(ratings_df, movies_df, on='movieId')
|
pandas.merge
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.