prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime
from sktime.transformations.panel.rocket import MiniRocket as MiniRKT
from sktime.classification.shapelet_based import MrSEQLClassifier
from convst.utils import load_sktime_arff_file_resample_id, return_all_dataset_names, UCR_stratified_resample
from convst.transformers import ConvolutionalShapeletTransformer
from sklearn.linear_model import RidgeClassifierCV
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import cross_validate
from sklearn.metrics import f1_score, make_scorer
from sklearn.metrics import accuracy_score
from wildboar.ensemble import ShapeletForestClassifier
from numba import set_num_threads
#Can use this to resume to last dataset if a problem occured
resume = False
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Modify this to your path to the UCR resamples, check README for how to get them.
# Another splitter is also provided in dataset_utils to make random resamples
base_UCR_resamples_path = r"/home/prof/guillaume/Shapelets/resamples/"
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
print("Imports OK")
#n_cv = 1 to test on original train test split.
n_cv=30
n_trees=200
max_ft=1.0
P=80
n_bins=11
random_state = None
run_RKT = True
run_CST = True
run_MrSEQL = True
run_SFC = True
#Machine parameters, to change with yours.
available_memory_bytes = 60 * 1e9
n_cores = 32
def get_n_jobs_n_threads(nbytes, size_mult=3000):
nbytes *= size_mult
n_jobs = min(max(available_memory_bytes//nbytes,1),n_cv//2)
n_threads = min(max(n_cores//n_jobs,1),n_cores//2)
return int(n_jobs), int(n_threads)
csv_name = 'CV_{}_results_({},{})_{}_{}.csv'.format(
n_cv, n_trees, max_ft, n_bins, P)
dataset_names = return_all_dataset_names()
if resume:
df = pd.read_csv(csv_name)
df = df.set_index('Unnamed: 0')
df = df.drop(df.index[np.where(~df.index.isin(dataset_names))[0]], axis=0)
df.to_csv(csv_name)
else:
df = pd.DataFrame(index=dataset_names)
df['CST_f1_mean'] = pd.Series(0, index=df.index)
df['CST_f1_std'] =
|
pd.Series(0, index=df.index)
|
pandas.Series
|
# Original implementation by <NAME> can be found using the following link: https://github.com/ryansmcgee/seirsplus
# Copyright (c) 2020 by <NAME>, <NAME>, BIOMATH, Ghent University. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import networkx as networkx
import numpy as numpy
import scipy as scipy
import scipy.integrate
import pandas as pd
from random import choices
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
from scipy import interpolate as inter
import copy
import pso
import multiprocessing
# set color schemes
#From Color Universal Design (CUD): https://jfly.uni-koeln.de/color/
orange = "#E69F00"
light_blue = "#56B4E9"
green = "#009E73"
yellow = "#F0E442"
blue = "#0072B2"
red = "#D55E00"
pink = "#CC79A7"
black = "#000000"
Okabe_Ito = (orange, light_blue, green, yellow, blue, red, pink, black)
plt.rcParams["axes.prop_cycle"] = matplotlib.cycler('color', Okabe_Ito)
# increase font sizes
# the code below is not wrong, but kinda annoying if you continuously import
# this model in a notebook using the load_ext magic
#multiplier = 1.5
#keys = ("font.size", )
#for key in keys:
# plt.rcParams[key] *= multiplier
plt.rcParams["font.size"] = 15
plt.rcParams["lines.linewidth"] = 3
class SEIRSAgeModel():
"""
A class to simulate the Deterministic extended SEIRS Model with optionl age-structuring
=======================================================================================
Params:
"""
def __init__(self, initN, beta, sigma, Nc=0, zeta=0,sm=0,m=0,h=0,c=0,dsm=0,dm=0,dhospital=0,dh=0,dcf=0,dcr=0,mc0=0,ICU=0,totalTests=0,
psi_FP=0,psi_PP=0,dq=14,initE=0,initSM=0,initM=0,initH=0,initC=0,initHH=0,initCH=0,initR=0,
initF=0,initSQ=0,initEQ=0,initSMQ=0,initMQ=0,initRQ=0,monteCarlo=False,n_samples=1):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Clinical parameters
self.beta = beta
self.sigma = sigma
self.Nc = Nc
self.zeta = zeta
self.sm = sm
self.m = m
self.h = h
self.c = c
self.dsm = dsm
self.dm = dm
self.dhospital = dhospital
self.dh = dh
self.dcf = dcf
self.dcr = dcr
self.mc0 = mc0
self.ICU = ICU
# Testing-related parameters:
self.totalTests = totalTests
self.psi_FP = psi_FP
self.psi_PP = psi_PP
self.dq = dq
# monte-carlo sampling is an attribute of the model
self.monteCarlo = monteCarlo
self.n_samples = n_samples
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Reshape inital condition in Nc.shape[0] x 1 2D arrays:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# initial condition must be an attribute of class: WAS NOT ADDED ORIGINALLY
self.initN = numpy.reshape(initN,[Nc.shape[0],1])
self.initE = numpy.reshape(initE,[Nc.shape[0],1])
self.initSM = numpy.reshape(initSM,[Nc.shape[0],1])
self.initM = numpy.reshape(initM,[Nc.shape[0],1])
self.initH = numpy.reshape(initH,[Nc.shape[0],1])
self.initC = numpy.reshape(initH,[Nc.shape[0],1])
self.initHH = numpy.reshape(initHH,[Nc.shape[0],1])
self.initCH = numpy.reshape(initCH,[Nc.shape[0],1])
self.initR = numpy.reshape(initR,[Nc.shape[0],1])
self.initF = numpy.reshape(initF,[Nc.shape[0],1])
self.initSQ = numpy.reshape(initSQ,[Nc.shape[0],1])
self.initEQ = numpy.reshape(initEQ,[Nc.shape[0],1])
self.initSMQ = numpy.reshape(initSMQ,[Nc.shape[0],1])
self.initMQ = numpy.reshape(initMQ,[Nc.shape[0],1])
self.initRQ = numpy.reshape(initRQ,[Nc.shape[0],1])
#~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tseries = numpy.array([0])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# per age category:
self.N = self.initN.astype(int)
self.numE = self.initE.astype(int)
self.numSM = self.initSM.astype(int)
self.numM = self.initM.astype(int)
self.numH = self.initH.astype(int)
self.numC = self.initC.astype(int)
self.numHH = self.initHH.astype(int)
self.numCH = self.initCH.astype(int)
self.numR = self.initR.astype(int)
self.numF = self.initF.astype(int)
self.numSQ = self.initSQ.astype(int)
self.numEQ = self.initEQ.astype(int)
self.numSMQ = self.initSMQ.astype(int)
self.numMQ = self.initMQ.astype(int)
self.numRQ = self.initRQ.astype(int)
self.numS = numpy.reshape(self.N[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numE[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numSM[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numM[:,-1],[Nc.shape[0],1])
- numpy.reshape(self.numH[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numC[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numHH[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numCH[:,-1],[Nc.shape[0],1])
- numpy.reshape(self.numR[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numF[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numSQ[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numEQ[:,-1],[Nc.shape[0],1])
- numpy.reshape(self.numEQ[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numSMQ[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numMQ[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numRQ[:,-1],[Nc.shape[0],1])
def reset(self):
Nc = self.Nc
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Reset Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tseries = numpy.array([0])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Reshape inital condition in Nc.shape[0] x 1 2D arrays:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.initN = numpy.reshape(self.initN,[self.Nc.shape[0],1])
self.initE = numpy.reshape(self.initE,[self.Nc.shape[0],1])
self.initSM = numpy.reshape(self.initSM,[self.Nc.shape[0],1])
self.initM = numpy.reshape(self.initM,[self.Nc.shape[0],1])
self.initH = numpy.reshape(self.initH,[self.Nc.shape[0],1])
self.initC = numpy.reshape(self.initC,[self.Nc.shape[0],1])
self.initHH = numpy.reshape(self.initHH,[self.Nc.shape[0],1])
self.initCH = numpy.reshape(self.initCH,[self.Nc.shape[0],1])
self.initR = numpy.reshape(self.initR,[self.Nc.shape[0],1])
self.initF = numpy.reshape(self.initF,[self.Nc.shape[0],1])
self.initSQ = numpy.reshape(self.initSQ,[self.Nc.shape[0],1])
self.initEQ = numpy.reshape(self.initEQ,[self.Nc.shape[0],1])
self.initSMQ = numpy.reshape(self.initSMQ,[self.Nc.shape[0],1])
self.initMQ = numpy.reshape(self.initMQ,[self.Nc.shape[0],1])
self.initRQ = numpy.reshape(self.initRQ,[self.Nc.shape[0],1])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# per age category:
self.N = self.initN.astype(int)
self.numE = self.initE.astype(int)
self.numSM = self.initSM.astype(int)
self.numM = self.initM.astype(int)
self.numH = self.initH.astype(int)
self.numC = self.initC.astype(int)
self.numHH = self.initHH.astype(int)
self.numCH = self.initCH.astype(int)
self.numR = self.initR.astype(int)
self.numF = self.initF.astype(int)
self.numSQ = self.initSQ.astype(int)
self.numEQ = self.initEQ.astype(int)
self.numSMQ = self.initSMQ.astype(int)
self.numMQ = self.initMQ.astype(int)
self.numRQ = self.initRQ.astype(int)
self.numS = numpy.reshape(self.N[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numE[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numSM[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numM[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numH[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numC[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numHH[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numCH[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numR[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numF[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numSQ[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numEQ[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numEQ[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numSMQ[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numMQ[:,-1],[Nc.shape[0],1]) - numpy.reshape(self.numRQ[:,-1],[Nc.shape[0],1])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@staticmethod
def system_dfes(t, variables, beta, sigma, Nc, zeta, sm, m, h, c, dsm, dm, dhospital, dh, dcf, dcr, mc0, ICU, totalTests, psi_FP, psi_PP, dq):
# input is a 1D-array
# first extract seperate variables in 1D-array
S,E,SM,M,H,C,HH,CH,R,F,SQ,EQ,SMQ,MQ,RQ = variables.reshape(15,Nc.shape[0])
# reshape all variables to a Nc.shape[0]x1 2D-array
S = numpy.reshape(S,[Nc.shape[0],1])
E = numpy.reshape(E,[Nc.shape[0],1])
SM = numpy.reshape(SM,[Nc.shape[0],1])
M = numpy.reshape(M,[Nc.shape[0],1])
H = numpy.reshape(H,[Nc.shape[0],1])
C = numpy.reshape(C,[Nc.shape[0],1])
HH = numpy.reshape(HH,[Nc.shape[0],1])
CH = numpy.reshape(CH,[Nc.shape[0],1])
R = numpy.reshape(R,[Nc.shape[0],1])
F = numpy.reshape(F,[Nc.shape[0],1])
SQ = numpy.reshape(SQ,[Nc.shape[0],1])
EQ = numpy.reshape(EQ,[Nc.shape[0],1])
SMQ = numpy.reshape(SMQ,[Nc.shape[0],1])
MQ = numpy.reshape(MQ,[Nc.shape[0],1])
RQ = numpy.reshape(RQ,[Nc.shape[0],1])
# calculate total population per age bin using 2D array
N = S + E + SM + M + H + C + HH + CH + R + SQ + EQ + SMQ + MQ + RQ
# calculate the test rates for each pool using the total number of available tests
nT = S + E + SM + M + R
theta_S = totalTests/nT
theta_S[theta_S > 1] = 1
theta_E = totalTests/nT
theta_E[theta_E > 1] = 1
theta_SM = totalTests/nT
theta_SM[theta_SM > 1] = 1
theta_M = totalTests/nT
theta_M[theta_M > 1] = 1
theta_R = totalTests/nT
theta_R[theta_R > 1] = 1
# calculate rates of change using the 2D arrays
dS = - beta*numpy.matmul(Nc,((E+SM)/N)*S) - theta_S*psi_FP*S + SQ/dq + zeta*R
dE = beta*numpy.matmul(Nc,((E+SM)/N)*S) - E/sigma - theta_E*psi_PP*E
dSM = sm/sigma*E - SM/dsm - theta_SM*psi_PP*SM
dM = m/sigma*E - M/dm - theta_M*psi_PP*M
dH = h/sigma*E - H/dhospital + h/sigma*EQ
dC = c/sigma*E - C/dhospital + c/sigma*EQ
dHH = H/dhospital - HH/dh
dCH = C/dhospital - mc0*CH/dcf - (1-mc0)*CH/dcr
dR = SM/dsm + M/dm + HH/dh + (1-mc0)*CH/dcr + SMQ/dsm + MQ/dm + RQ/dq - zeta*R
dF = mc0*CH/dcf
dSQ = theta_S*psi_FP*S - SQ/dq
dEQ = theta_E*psi_PP*E - EQ/sigma
dSMQ = theta_SM*psi_PP*SM + sm/sigma*EQ - SMQ/dsm
dMQ = theta_M*psi_PP*M + m/sigma*EQ - MQ/dm
dRQ = theta_R*psi_FP*R - RQ/dq
# reshape output back into a 1D array of similar dimension as input
out = numpy.array([dS,dE,dSM,dM,dH,dC,dHH,dCH,dR,dF,dSQ,dEQ,dSMQ,dMQ,dRQ])
out = numpy.reshape(out,15*Nc.shape[0])
return out
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_epoch(self, runtime, dt=1):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create a list of times at which the ODE solver should output system values.
# Append this list of times as the model's timeseries
t_eval = numpy.arange(start=self.t+1, stop=self.t+runtime, step=dt)
# Define the range of time values for the integration:
t_span = (self.t, self.t+runtime)
# Define the initial conditions as the system's current state:
# (which will be the t=0 condition if this is the first run of this model,
# else where the last sim left off)
init_cond = numpy.array([self.numS[:,-1], self.numE[:,-1], self.numSM[:,-1], self.numM[:,-1], self.numH[:,-1], self.numC[:,-1], self.numHH[:,-1], self.numCH[:,-1], self.numR[:,-1], self.numF[:,-1], self.numSQ[:,-1], self.numEQ[:,-1], self.numSMQ[:,-1], self.numMQ[:,-1], self.numRQ[:,-1]])
init_cond = numpy.reshape(init_cond,15*self.Nc.shape[0])
#init_cond = [self.numS[-1], self.numE[-1], self.numSM[-1], self.numM[-1], self.numH[-1], self.numC[-1], self.numHH[-1], self.numCH[-1],self.numR[-1], self.numF[-1], self.numSQ[-1],self.numEQ[-1], self.numSMQ[-1], self.numMQ[-1], self.numRQ[-1]]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Solve the system of differential eqns:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
solution = scipy.integrate.solve_ivp(lambda t, X: SEIRSAgeModel.system_dfes(t, X, self.beta, self.sigma, self.Nc, self.zeta, self.sm, self.m, self.h, self.c, self.dsm,self.dm,
self.dhospital,self.dh,self.dcf,self.dcr,self.mc0,self.ICU,self.totalTests,self.psi_FP,self.psi_PP,self.dq),
t_span=[self.t, self.tmax], y0=init_cond, t_eval=t_eval
)
# output of size (nTimesteps * Nc.shape[0])
S,E,SM,M,H,C,HH,CH,R,F,SQ,EQ,SMQ,MQ,RQ = numpy.split(numpy.transpose(solution['y']),15,axis=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Store the solution output as the model's time series and data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# transpose before appending
# append per category:
self.tseries = numpy.append(self.tseries, solution['t'])
self.numS = numpy.append(self.numS, numpy.transpose(S),axis=1)
self.numE = numpy.append(self.numE, numpy.transpose(E),axis=1)
self.numSM = numpy.append(self.numSM, numpy.transpose(SM),axis=1)
self.numM = numpy.append(self.numM, numpy.transpose(M),axis=1)
self.numH = numpy.append(self.numH, numpy.transpose(H),axis=1)
self.numC = numpy.append(self.numC, numpy.transpose(C),axis=1)
self.numHH = numpy.append(self.numHH, numpy.transpose(HH),axis=1)
self.numCH = numpy.append(self.numCH, numpy.transpose(CH),axis=1)
self.numR = numpy.append(self.numR, numpy.transpose(R),axis=1)
self.numF = numpy.append(self.numF, numpy.transpose(F),axis=1)
self.numSQ = numpy.append(self.numSQ, numpy.transpose(SQ),axis=1)
self.numEQ = numpy.append(self.numEQ, numpy.transpose(EQ),axis=1)
self.numSMQ = numpy.append(self.numSMQ, numpy.transpose(SMQ),axis=1)
self.numMQ = numpy.append(self.numMQ, numpy.transpose(MQ),axis=1)
self.numRQ = numpy.append(self.numRQ, numpy.transpose(RQ),axis=1)
self.t = self.tseries[-1]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, checkpoints, dt=1, verbose=False):
#def run(self, T, dt=1, checkpoints=None, verbose=False):
if(T>0):
self.tmax += T + 1
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
paramNames = ['beta', 'sigma', 'Nc', 'zeta', 'sm', 'm', 'h', 'c','dsm','dm','dhospital','dh','dcf','dcr','mc0','ICU','totalTests',
'psi_FP','psi_PP','dq']
for param in paramNames:
# For params that don't have given checkpoint values (or bad value given),
# set their checkpoint values to the value they have now for all checkpoints.
if(param not in list(checkpoints.keys())
or not isinstance(checkpoints[param], (list, numpy.ndarray))
or len(checkpoints[param])!=numCheckpoints):
checkpoints[param] = [getattr(self, param)]*numCheckpoints
# Before using checkpoints, save variables to be changed by method
beforeChk=[]
for key in checkpoints.keys():
if key is not 't':
beforeChk.append(getattr(self,key))
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if(not checkpoints):
self.run_epoch(runtime=self.tmax, dt=dt)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[:,-1]))
print("\t E = " + str(self.numE[:,-1]))
print("\t SM = " + str(self.numSM[:,-1]))
print("\t M = " + str(self.numM[:,-1]))
print("\t H = " + str(self.numH[:,-1]))
print("\t C = " + str(self.numC[:,-1]))
print("\t HH = " + str(self.numHH[:,-1]))
print("\t CH = " + str(self.numCH[:,-1]))
print("\t R = " + str(self.numR[:,-1]))
print("\t F = " + str(self.numF[:,-1]))
print("\t SQ = " + str(self.numSQ[:,-1]))
print("\t EQ = " + str(self.numEQ[:,-1]))
print("\t SMQ = " + str(self.numSMQ[:,-1]))
print("\t MQ = " + str(self.numMQ[:,-1]))
print("\t RQ = " + str(self.numRQ[:,-1]))
else: # checkpoints provided
for checkpointIdx, checkpointTime in enumerate(checkpoints['t']):
# Run the sim until the next checkpoint time:
self.run_epoch(runtime=checkpointTime-self.t, dt=dt)
# Having reached the checkpoint, update applicable parameters:
#print("[Checkpoint: Updating parameters]")
for param in paramNames:
setattr(self, param, checkpoints[param][checkpointIdx])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[:,-1]))
print("\t E = " + str(self.numE[:,-1]))
print("\t SM = " + str(self.numSM[:,-1]))
print("\t M = " + str(self.numM[:,-1]))
print("\t H = " + str(self.numH[:,-1]))
print("\t C = " + str(self.numC[:,-1]))
print("\t HH = " + str(self.numHH[:,-1]))
print("\t CH = " + str(self.numCH[:,-1]))
print("\t R = " + str(self.numR[:,-1]))
print("\t F = " + str(self.numF[:,-1]))
print("\t SQ = " + str(self.numSQ[:,-1]))
print("\t EQ = " + str(self.numEQ[:,-1]))
print("\t SMQ = " + str(self.numSMQ[:,-1]))
print("\t MQ = " + str(self.numMQ[:,-1]))
print("\t RQ = " + str(self.numRQ[:,-1]))
if(self.t < self.tmax):
self.run_epoch(runtime=self.tmax-self.t, dt=dt)
# Reset all parameter values that were changed back to their original value
i = 0
for key in checkpoints.keys():
if key is not 't':
setattr(self,key,beforeChk[i])
i = i+1
return self
def sim(self, T, dt=1, checkpoints=None, verbose=False):
tN = int(T) + 1
if self.monteCarlo==False:
sigmavect = numpy.array([5.2])
self.n_samples = 1
else:
if self.n_samples is 1:
self.n_samples = 100
# sample a total of n_samples from distribution of
sigmavect = self.sampleFromDistribution('../data/corona_incubatie_data.csv',self.n_samples)
# pre-allocate a 3D matrix for the raw results
self.S = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.E = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.SM = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.M = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.H = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.C = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.HH = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.CH = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.R = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.F = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.SQ = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.EQ = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.SMQ = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.MQ = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
self.RQ = numpy.zeros([self.Nc.shape[0],tN,self.n_samples])
# pre-allocate a 2D matrix for the results summed over all age bins
self.sumS = numpy.zeros([tN,self.n_samples])
self.sumE = numpy.zeros([tN,self.n_samples])
self.sumSM = numpy.zeros([tN,self.n_samples])
self.sumM = numpy.zeros([tN,self.n_samples])
self.sumH = numpy.zeros([tN,self.n_samples])
self.sumC = numpy.zeros([tN,self.n_samples])
self.sumHH = numpy.zeros([tN,self.n_samples])
self.sumCH = numpy.zeros([tN,self.n_samples])
self.sumR = numpy.zeros([tN,self.n_samples])
self.sumF = numpy.zeros([tN,self.n_samples])
self.sumSQ = numpy.zeros([tN,self.n_samples])
self.sumEQ = numpy.zeros([tN,self.n_samples])
self.sumSMQ = numpy.zeros([tN,self.n_samples])
self.sumMQ = numpy.zeros([tN,self.n_samples])
self.sumRQ = numpy.zeros([tN,self.n_samples])
# simulation loop
i=0
for self.sigma in sigmavect:
# reset self to initial conditioin
self.reset()
# perform simulation
self.run(int(T),checkpoints)
# append raw results to 3D matrix
self.S[:,:,i] = self.numS
self.E[:,:,i] = self.numE
self.SM[:,:,i] = self.numSM
self.M[:,:,i] = self.numM
self.H[:,:,i] = self.numH
self.C[:,:,i] = self.numC
self.HH[:,:,i] = self.numHH
self.CH[:,:,i] = self.numCH
self.R[:,:,i] = self.numR
self.F[:,:,i] = self.numF
self.SQ[:,:,i] = self.numSQ
self.EQ[:,:,i] = self.numEQ
self.SMQ[:,:,i] = self.numSMQ
self.MQ[:,:,i] = self.numMQ
self.RQ[:,:,i] = self.numRQ
# convert raw results to sums of all age categories
self.sumS[:,i] = self.numS.sum(axis=0)
self.sumE[:,i] = self.numE.sum(axis=0)
self.sumSM[:,i] = self.numSM.sum(axis=0)
self.sumM[:,i] = self.numM.sum(axis=0)
self.sumH[:,i] = self.numH.sum(axis=0)
self.sumC[:,i] = self.numC.sum(axis=0)
self.sumHH[:,i] = self.numHH.sum(axis=0)
self.sumCH[:,i] = self.numCH.sum(axis=0)
self.sumR[:,i] = self.numR.sum(axis=0)
self.sumF[:,i] = self.numF.sum(axis=0)
self.sumSQ[:,i] = self.numSQ.sum(axis=0)
self.sumEQ[:,i] = self.numEQ.sum(axis=0)
self.sumSMQ[:,i] = self.numSMQ.sum(axis=0)
self.sumMQ[:,i] = self.numMQ.sum(axis=0)
self.sumRQ[:,i] = self.numRQ.sum(axis=0)
i = i + 1
return self
def sampleFromDistribution(self,filename,k):
df =
|
pd.read_csv(filename)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import h5py
from tqdm import tqdm
class PredExpr:
def __init__(self, fn):
self.fn = fn
with h5py.File(self.fn, 'r') as f:
tmp = f['samples'][:].astype(str)
self.samples = pd.DataFrame({
'eid': tmp,
'idx': [ i for i in range(len(tmp)) ]
})
tmp = f['genes'][:].astype(str)
self.genes = pd.DataFrame({
'gene': tmp,
'idx': [ i for i in range(len(tmp)) ]
})
@staticmethod
def _get_range(n, chunksize=500):
tmp = list(np.arange(0, n, chunksize))
if tmp[-1] != n:
tmp = list(tmp) + [ n ]
return tmp[:-1].copy(), tmp[1:].copy()
def mul_weights(self, df_weight, samples, max_n=None, chunksize=1000):
df_sample_sub = pd.merge(
self.samples,
pd.DataFrame({'eid': samples}),
on='eid'
)
if max_n is not None and max_n < df_sample_sub.shape[0]:
df_sample_sub = df_sample_sub.iloc[:max_n, :].reset_index(drop=True)
df_weight_sub = pd.merge(
self.genes[['gene']],
df_weight,
on='gene', how='left'
)
df_weight_sub.fillna(0, inplace=True)
weight_mat = df_weight_sub.drop(columns=['gene']).values
header = list(df_weight_sub.drop(columns=['gene']).columns)
sample_idx = df_sample_sub.idx.values
starts, ends = self._get_range(sample_idx.shape[0], chunksize=chunksize)
o = []
f = h5py.File(self.fn, 'r')
for s, e in tqdm(zip(starts, ends), total=len(starts)):
mat = f['pred_expr'][:, sample_idx[s:e]]
mat = mat.T @ weight_mat
o.append(mat)
f.close()
o = np.concatenate(o, axis=0)
o =
|
pd.DataFrame(o, columns=header)
|
pandas.DataFrame
|
import pandas as pd
import sys
from suds.client import Client as sudsclient
import ssl
import os
if "PYFLASKI" in os.environ:
from pyflaski.routines import fuzzy_search
else:
from flaski.routines import fuzzy_search
david_categories = [
'GOTERM_BP_FAT', 'GOTERM_CC_FAT', 'GOTERM_MF_FAT', 'KEGG_PATHWAY',
'BIOCARTA', 'PFAM', 'PROSITE' ]
david_fields = [
'categoryName', 'termName', 'listHits', 'percent',
'ease', 'geneIds', 'listTotals', 'popHits', 'popTotals',
'foldEnrichment', 'bonferroni', 'benjamini', 'afdr']
# include:
# 'fisher'
# 'termName' to 'term' and 'term_name'
DEBUG_GENES="ENSMUSG00000092622,ENSMUSG00000004415,ENSMUSG00000017144,ENSMUSG00000028972,ENSMUSG00000031026,ENSMUSG00000006360,ENSMUSG00000039106,ENSMUSG00000038932,\
ENSMUSG00000040629,ENSMUSG00000044254,ENSMUSG00000060675,ENSMUSG00000037465,ENSMUSG00000033998,ENSMUSG00000030785,ENSMUSG00000042808,ENSMUSG00000034612,ENSMUSG00000032883,\
ENSMUSG00000037820,ENSMUSG00000052955,ENSMUSG00000005892,ENSMUSG00000086228,ENSMUSG00000035504,ENSMUSG00000074063,ENSMUSG00000085682,ENSMUSG00000048376,ENSMUSG00000018865,\
ENSMUSG00000025104,ENSMUSG00000022763,ENSMUSG00000030800,ENSMUSG00000021226,ENSMUSG00000038188,ENSMUSG00000038507,ENSMUSG00000014776,ENSMUSG00000029151,ENSMUSG00000030549,\
ENSMUSG00000063430,ENSMUSG00000021194,ENSMUSG00000028836,ENSMUSG00000003849,ENSMUSG00000017493,ENSMUSG00000001506,ENSMUSG00000059991,ENSMUSG00000058454,ENSMUSG00000024962,\
ENSMUSG00000020042,ENSMUSG00000037035,ENSMUSG00000058301,ENSMUSG00000058741,ENSMUSG00000039814,ENSMUSG00000026807,ENSMUSG00000046607,ENSMUSG00000004341,ENSMUSG00000038291,\
ENSMUSG00000070000,ENSMUSG00000029718,ENSMUSG00000026114,ENSMUSG00000032946,ENSMUSG00000022505,ENSMUSG00000034450,ENSMUSG00000067261,ENSMUSG00000022432,ENSMUSG00000022048,\
ENSMUSG00000032494,ENSMUSG00000026418,ENSMUSG00000051455,ENSMUSG00000018411,ENSMUSG00000009596,ENSMUSG00000022469,ENSMUSG00000087283,ENSMUSG00000073779,ENSMUSG00000031379,\
ENSMUSG00000034573,ENSMUSG00000008090,ENSMUSG00000046500,ENSMUSG00000013418,ENSMUSG00000028760,ENSMUSG00000003848,ENSMUSG00000040428,ENSMUSG00000004891,ENSMUSG00000030350,\
ENSMUSG00000003037,ENSMUSG00000055553,ENSMUSG00000034112,ENSMUSG00000025196,ENSMUSG00000034324,ENSMUSG00000026775,ENSMUSG00000056537,ENSMUSG00000029168,ENSMUSG00000031410,\
ENSMUSG00000034880,ENSMUSG00000034731,ENSMUSG00000031584,ENSMUSG00000084807,ENSMUSG00000031861,ENSMUSG00000022265,ENSMUSG00000031438,ENSMUSG00000033658,ENSMUSG00000059456,\
ENSMUSG00000042249,ENSMUSG00000024331,ENSMUSG00000034807,ENSMUSG00000030747,ENSMUSG00000031660,ENSMUSG00000023800,ENSMUSG00000070880,ENSMUSG00000023045,ENSMUSG00000052724,\
ENSMUSG00000061815,ENSMUSG00000032068,ENSMUSG00000030310,ENSMUSG00000013766,ENSMUSG00000063903,ENSMUSG00000023951,ENSMUSG00000030137,ENSMUSG00000015994,ENSMUSG00000040624,\
ENSMUSG00000048644,ENSMUSG00000038840,ENSMUSG00000032015,ENSMUSG00000028949,ENSMUSG00000037971,ENSMUSG00000048371,ENSMUSG00000047264,ENSMUSG00000015243,ENSMUSG00000039865,\
ENSMUSG00000031683,ENSMUSG00000032643,ENSMUSG00000074593,ENSMUSG00000032540,ENSMUSG00000040280,ENSMUSG00000024036,ENSMUSG00000074365,ENSMUSG00000021266,ENSMUSG00000104968,\
ENSMUSG00000006205,ENSMUSG00000043419,ENSMUSG00000032020,ENSMUSG00000039395,ENSMUSG00000062939,ENSMUSG00000031985,ENSMUSG00000034486,ENSMUSG00000034863,ENSMUSG00000047502,\
ENSMUSG00000050737,ENSMUSG00000024012,ENSMUSG00000008892,ENSMUSG00000015652,ENSMUSG00000022178,ENSMUSG00000048373,ENSMUSG00000022292,ENSMUSG00000019312,ENSMUSG00000039831,\
ENSMUSG00000026458,ENSMUSG00000020122,ENSMUSG00000031924,ENSMUSG00000004565,ENSMUSG00000037669,ENSMUSG00000005267,ENSMUSG00000002949,ENSMUSG00000048988,ENSMUSG00000053856,\
ENSMUSG00000090363,ENSMUSG00000009670,ENSMUSG00000056515,ENSMUSG00000036442,ENSMUSG00000031751,ENSMUSG00000030263,ENSMUSG00000022040,ENSMUSG00000031749,ENSMUSG00000038742,\
ENSMUSG00000070780,ENSMUSG00000070708,ENSMUSG00000003808,ENSMUSG00000037997,ENSMUSG00000026773,ENSMUSG00000022099,ENSMUSG00000081593,ENSMUSG00000045467,ENSMUSG00000031509,\
ENSMUSG00000031672,ENSMUSG00000030413,ENSMUSG00000042757,ENSMUSG00000031508,ENSMUSG00000022180,ENSMUSG00000037355,ENSMUSG00000035561,ENSMUSG00000106647,ENSMUSG00000063049,\
ENSMUSG00000028785,ENSMUSG00000031453,ENSMUSG00000111147,ENSMUSG00000003283,ENSMUSG00000063488,ENSMUSG00000046774,ENSMUSG00000036054,ENSMUSG00000024042,ENSMUSG00000039157,\
ENSMUSG00000038060,ENSMUSG00000030283,ENSMUSG00000038521,ENSMUSG00000038393,ENSMUSG00000030772,ENSMUSG00000030428,ENSMUSG00000041180,ENSMUSG00000031729,ENSMUSG00000054850,\
ENSMUSG00000025931,ENSMUSG00000039384,ENSMUSG00000022479,ENSMUSG00000029287,ENSMUSG00000025743,ENSMUSG00000042386,ENSMUSG00000096210,ENSMUSG00000050288,ENSMUSG00000019261,\
ENSMUSG00000040537,ENSMUSG00000026185,ENSMUSG00000029761,ENSMUSG00000027071,ENSMUSG00000005705,ENSMUSG00000008450,ENSMUSG00000018604,ENSMUSG00000060038,ENSMUSG00000006585,\
ENSMUSG00000086236,ENSMUSG00000054408,ENSMUSG00000029122,ENSMUSG00000025742,ENSMUSG00000004319,ENSMUSG00000052675,ENSMUSG00000031948,ENSMUSG00000081044,ENSMUSG00000039830,\
ENSMUSG00000030411,ENSMUSG00000045010,ENSMUSG00000039616,ENSMUSG00000011837,ENSMUSG00000022211,ENSMUSG00000001472,ENSMUSG00000000738,ENSMUSG00000042659,ENSMUSG00000071076,\
ENSMUSG00000031838,ENSMUSG00000020256,ENSMUSG00000028017,ENSMUSG00000063659,ENSMUSG00000046718,ENSMUSG00000032715,ENSMUSG00000023495,ENSMUSG00000099370,ENSMUSG00000031486,\
ENSMUSG00000038292,ENSMUSG00000031760,ENSMUSG00000007950,ENSMUSG00000039617,ENSMUSG00000057672,ENSMUSG00000031622,ENSMUSG00000025432,ENSMUSG00000055835,ENSMUSG00000031665,\
ENSMUSG00000008206,ENSMUSG00000063018,ENSMUSG00000091568,ENSMUSG00000033931,ENSMUSG00000021701,ENSMUSG00000022016,ENSMUSG00000023995,ENSMUSG00000030630,ENSMUSG00000032796,\
ENSMUSG00000029603,ENSMUSG00000048126,ENSMUSG00000053604,ENSMUSG00000097757,ENSMUSG00000087084,ENSMUSG00000018796,ENSMUSG00000037103,ENSMUSG00000017652,ENSMUSG00000020184,\
ENSMUSG00000050914,ENSMUSG00000031765,ENSMUSG00000068758,ENSMUSG00000061126,ENSMUSG00000004952,ENSMUSG00000031731,ENSMUSG00000022754,ENSMUSG00000030523,ENSMUSG00000002668"
def debug_david(user,DEBUG_GENES=DEBUG_GENES, ids=None):
ssl._create_default_https_context = ssl._create_unverified_context
url = 'https://david.ncifcrf.gov/webservice/services/DAVIDWebService?wsdl'
client = sudsclient(url)
client.wsdl.services[0].setlocation('https://david.ncifcrf.gov/webservice/services/DAVIDWebService.DAVIDWebServiceHttpSoap11Endpoint/')
client_auth = client.service.authenticate(user)
if not ids:
ids=DEBUG_GENES
database="ENSEMBL_GENE_ID"
name="target"
categories="GOTERM_BP_FAT,GOTERM_CC_FAT,GOTERM_MF_FAT,PFAM,KEGG_PATHWAY,OMIM_DISEASE"
p=0.1
n=2
size = client.service.addList(ids, database, name, 0) #| inputListIds,idType,listName,listType)
client_categories = client.service.setCategories(categories)
client_report = client.service.getChartReport(p, n)
size_report = len(client_report)
report="Success: "+str(size)+"; "+str(size_report)
return report
def run_david(pa, path_to_ensembl_maps="/flaski/data/david"):
#database, categories, user, ids, ids_bg = None, name = '', name_bg = '', verbose = False, p = 0.1, n = 2):
# Modified from https://david.ncifcrf.gov/content.jsp?file=WS.html
# by courtesy of HuangYi @ 20110424
"""Queries the DAVID database for an enrichment analysis
Check https://david.ncifcrf.gov/content.jsp?file=DAVID_API.html for database == "type" tag and categories == "annot" tag.
Args:
pa (dict): A dictionary of the style { "argument":"value"} as outputted by `figure_defaults`.
Returns:
None if no ids match the queried database, or a Pandas DataFrame with results.
"""
database=pa["database_value"]
categories_=[ s for s in list( pa.keys() ) ]
categories_=[ s for s in categories_ if "categories_" in s ]
categories_=[ s for s in categories_ if "_value" in s ]
categories=[]
for k in categories_:
categories=categories+pa[k]
categories=",".join(categories)
user=pa["user"]
ids=pa["ids"].split("\n")
ids=[ s.rstrip("\r").strip(" ") for s in ids if s != " "]
ids=[ s for s in ids if s != " "]
ids=[ s for s in ids if len(s) > 0 ]
ids=[ s.split("\t") for s in ids ]
idsdf=pd.DataFrame(ids)
idsdf[0]=idsdf[0].apply( lambda x: str(x).split(";")[0] )
names_dbs=["name_hsa_ensembl", "name_mus_ensembl", "name_cel_ensembl","name_dros_ensembl" ]
if database in names_dbs:
file_dic={"name_hsa_ensembl":"Homo_sapiens.GRCh38.92.tsv", "name_mus_ensembl":"Mus_musculus.GRCm38.92.tsv", "name_cel_ensembl":"Caenorhabditis_elegans.WBcel235.92.tsv","name_dros_ensembl":"Drosophila_melanogaster.BDGP6.28.92.tsv"}
id_name=pd.read_csv(path_to_ensembl_maps+"/"+file_dic[database],sep="\t")
db_names=id_name["gene_name"].tolist()
query_names=idsdf[0].tolist()
query_names=",".join(query_names)
found_values, emsg=fuzzy_search(query_names,db_names)
if emsg:
return None, None, emsg
newcol=idsdf.columns.tolist()[-1]+1
id_name["gene_name"]=id_name["gene_name"].apply(lambda x: str(x).lower() )
id_name.index=id_name["gene_name"].tolist()
id_name=id_name.to_dict()["gene_id"]
idsdf[newcol]=idsdf[0]
idsdf[0]=idsdf[0].apply(lambda x: id_name[ str(x).lower() ])
# insert mapping of ensembl gene name to gene id here
annotations=idsdf.columns.tolist()
ids=idsdf[0].tolist()
ids_map={}
if len(annotations) > 1:
idsdf[0]=idsdf[0].apply(lambda x: x.upper() )
idsdf.index=idsdf[0].tolist()
idsdf=idsdf.drop([0],axis=1)
ids_map=idsdf.to_dict()
if " ".join( pa["ids_bg"].split(" ")[:12] ) != "Leave empty if you want to use all annotated genes for your":
ids_bg=pa["ids_bg"].split("\n")
ids_bg=[ s.rstrip("\r").strip(" ") for s in ids_bg ]
ids_bg=[ s for s in ids_bg if s != " "]
ids_bg=[ s for s in ids_bg if len(s) > 0 ]
if len(ids_bg) == 0:
ids_bg = None
else:
if database in names_dbs:
file_dic={"name_hsa_ensembl":"Homo_sapiens.GRCh38.92.tsv", "name_mus_ensembl":"Mus_musculus.GRCm38.92.tsv", "name_cel_ensembl":"Caenorhabditis_elegans.WBcel235.92.tsv","name_dros_ensembl":"Drosophila_melanogaster.BDGP6.92.tsv"}
id_name=pd.read_csv(path_to_ensembl_maps+file_dic[database],sep="\t")
id_name_=id_name.copy()
db_names=id_name["gene_name"].tolist()
query_names=",".join(ids_bg)
found_values, emsg=fuzzy_search(query_names,db_names)
if emsg:
return None, None, emsg
id_name["gene_name"]=id_name["gene_name"].apply(lambda x: str(x).lower() )
id_name.index=id_name["gene_name"].tolist()
id_name=id_name.to_dict()["gene_id"]
ids_bg=[ id_name[ str(x).lower() ] for x in ids_bg ]
id_name_=id_name_[ id_name_["gene_id"].isin(ids_bg) ]
id_name_["gene_id"]=id_name_["gene_id"].apply(lambda x: str(x).upper() )
id_name_.index=id_name_["gene_id"].tolist()
id_name_=id_name_.to_dict()["gene_name"]
else:
id_name_=None
# bg_gene_names= keep on here
else:
ids_bg=None
name=pa["name"]
if ids_bg is not None:
name_bg=pa["name_bg"]
else:
name_bg=""
p=pa["p"]
n=pa["n"]
#, categories, user, ids, ids_bg = None, name = '', name_bg = '', verbose = False, p = 0.1, n = 2
verbose=False
ids = ','.join([str(i) for i in ids])
use_bg = 0
if database in names_dbs:
database="ENSEMBL_GENE_ID"
# print("Testing")
# test=debug_david(pa["user"],ids=ids)
# print(test)
if ids_bg:
ids_bg = ','.join([str(i) for i in ids_bg])
ssl._create_default_https_context = ssl._create_unverified_context
url = 'https://david.ncifcrf.gov/webservice/services/DAVIDWebService?wsdl'
try:
client = sudsclient(url)
except:
return None, None, "Could not connect to DAVID. Server might be down."
client.wsdl.services[0].setlocation('https://david.ncifcrf.gov/webservice/services/DAVIDWebService.DAVIDWebServiceHttpSoap11Endpoint/')
try:
client_auth = client.service.authenticate(user)
except:
return None, None, "Could not connect to DAVID. Server might be down."
if str(client_auth) == "Failed. For user registration, go to http://david.abcc.ncifcrf.gov/webservice/register.htm" :
return None, None, str(client_auth)
if verbose:
print('User Authentication:', client_auth)
sys.stdout.flush()
# if ids_bg :
# size = client.service.addList(ids_bg, database, name, 0)
# if float(size) > float(0):
# client_report=client.service.getListReport()
# bg_mapped=[]
# for r in client_report:
# d = dict(r)
# bg_mapped.append(d["values"][0])
# bg_not_mapped=[ s for s in ids_bg.split(",") if s not in bg_mapped ]
size = client.service.addList(ids, database, name, 0) #| inputListIds,idType,listName,listType)
report_stats=[['Mapping rate of ids: ', str(size)]]
if verbose:
print('Mapping rate of ids: ', str(size))
sys.stdout.flush()
if float(size) <= float(0):
msg='Mapping rate of ids: %s.' %str(size)
return None, None, msg
# client_report=client.service.getListReport()
# mapped=[]
# for r in client_report:
# d = dict(r)
# mapped.append(d["values"][0])
# not_mapped=[ s for s in ids.split(",") if s not in mapped ]
#print("Finished retrieving list report.")
#sys.stdout.flush()
if ids_bg:
#print("User given BG.")
#sys.stdout.flush()
size_bg = client.service.addList(ids_bg, database, name_bg, 1)
report_stats.append(['Mapping rate of background ids: ', str(size_bg)])
if verbose:
print('Mapping rate of background ids: ', str(size_bg))
sys.stdout.flush()
if float(size_bg) <= float(0):
msg='Mapping rate of background ids: %s' %str(size_bg)
return None, None, msg
client_categories = client.service.setCategories(categories)
report_stats.append(['Categories used: ', client_categories])
if verbose:
print('Categories used: ', client_categories)
sys.stdout.flush()
client_report = client.service.getChartReport(p, n)
size_report = len(client_report)
report_stats.append(['Records reported: ', str(size_report)])
if verbose:
print('Records reported: ', str(size_report))
sys.stdout.flush()
def get_map(x,ids_map):
genes=x.split(", ")
genes=[ str(ids_map[gene.upper()]) for gene in genes ]
genes=", ".join(genes)
return genes
if size_report > 0:
df = []
for r in client_report:
d = dict(r)
line = []
for f in david_fields:
line.append(str(d[f]).encode('ascii','ignore'))
df.append(line)
df = pd.DataFrame(df)
df.columns=david_fields
for col in david_fields:
df[col] = df[col].apply(lambda x: x.decode())
df.columns=["Category","Term","Count","%","PValue","Genes","List Total","Pop Hits","Pop Total","Fold Enrichment","Bonferroni","Benjamini","FDR"]
# insert ensembl gene name to gene id here
if len(list(ids_map.keys())) > 0:
for annotation in list(ids_map.keys()):
genes_to_annotation=ids_map[annotation]
df["annotation_%s" %str(annotation)]=df["Genes"].apply(lambda x:get_map(x,ids_map=genes_to_annotation) )
else:
df=
|
pd.DataFrame(columns=["Category","Term","Count","%","PValue","Genes","List Total","Pop Hits","Pop Total","Fold Enrichment","Bonferroni","Benjamini","FDR"])
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import subprocess
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
import json
pd.set_option('display.max_rows', 500)
# 
# # Data Understanding
# * RKI, webscrape (webscraping) https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html
# * <NAME> (GITHUB) https://github.com/CSSEGISandData/COVID-19.git
# * REST API services to retreive data https://npgeo-corona-npgeo-de.hub.arcgis.com/
# ## GITHUB csv data
#
# git clone/pull https://github.com/CSSEGISandData/COVID-19.git
# In[2]:
git_pull = subprocess.Popen( "/usr/bin/git pull" ,
cwd = os.path.dirname( '../data/raw/COVID-19/' ),
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE )
(out, error) = git_pull.communicate()
print("Error : " + str(error))
print("out : " + str(out))
# In[3]:
data_path='../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
pd_raw=
|
pd.read_csv(data_path)
|
pandas.read_csv
|
"""
Functions for processing/analysing ARNA campaign observations
"""
import os
import sys
import gc
import glob
import xarray as xr
import numpy as np
import AC_tools as AC
import pandas as pd
from netCDF4 import Dataset
from datetime import datetime as datetime_
import datetime as datetime
import time
from time import gmtime, strftime
# Import from elsewhere in ARNA module
from . core import *
from . utils import *
def get_coordinates_from_NetCDF_file(ds=None, folder=None, filename=None,
falt_var='PS_RVSM',
flat_var='LAT_GIN', flon_var='LON_GIN',
AltVar='hPa', LonVar='lon', LatVar='lat',
ftime_var='Time', TimeVar='time',
convert_m2hPa=False, drop_NaNs=True):
"""
Get locations (lat, lon, alt) from NetCDF files
"""
import pandas as pd
# Make a dataframne of locations from NEtCDF file
if isinstance(ds, type(None)):
ds = xr.open_dataset(folder+filename)
df = pd.DataFrame()
df[AltVar] = ds[falt_var].values
df[LonVar] = ds[flon_var].values
df[LatVar] = ds[flat_var].values
df.index = ds[ftime_var].values
# Convert metres of height to hPa
# NOTE: The below conversion is not advised.
# Use the external pressure variable instead (PS_RVSM).
if convert_m2hPa:
df.loc[:, AltVar] = AC.hPa_to_Km(df[AltVar].values/1E3, reverse=True, )
# Drop where there are not values for all coordinates
if drop_NaNs:
df = df.dropna()
return df
def get_ARNA_flights_as_dfs():
"""
Retrieve the ARNA flights as a list of dataframes
"""
flight_nums = [216, 217, 218, 219, 220, 221, 222, 223, 224, 225]
flight_IDs = ['C{}'.format(i) for i in flight_nums]
dfs = {}
for flight_ID in flight_IDs:
print(flight_ID)
try:
df = AC.get_FAAM_locations_as_df(flight_ID=flight_ID)
dfs[flight_ID] = df
except:
print('WARNING: failed for {}'.format(flight_ID))
return dfs
def set_flagged_data2NaNs(ds, VarName='no_mr', flag2use=0,
FlagName='no_flag'):
"""
Set the flagged data in a dataset to be NaNs
"""
# What do the flags mean? (copied from FAAM NetCDF metadata)
# ( file: 'core-nitrates_faam_20200211_v002_r1_c224.nc')
# Flag=0 indicates good data.
# Flag=1 indicates reduced quality data.
# Flag=2 indicates suspect data.
# Flag=3 indicates missing or invalid data.
# Create a boolean
bool = ds[FlagName].values != flag2use
# Make values without the flagged value into NaNs
ds[VarName].loc[bool] = np.NaN
return ds
def get_SWAS_data4flight(flight_ID=None):
"""
Retrieve SWAS data for ARNA flights
"""
# Where is the data?
folder = '{}/{}/'.format(get_local_folder('ARNA_data'), 'SWAS')
# filename = 'ARNA-FIRSTLOOK-SWAS_JDL_typo_fix.csv'
# var2use = 'comments4'
# format = '%d/%m/%Y %H:%M:%S'
# Or use latest file (NOTE: issue with column formating)
# filename = 'ARNA-SECONDLOOK-SWAS.csv'
# Use the updated second look file
filename = 'ARNA-SECONDLOOK-SWAS_v2.csv'
df = pd.read_csv(folder+filename)
print(filename)
# Update the index to use the SWAS fire fime
var2use = 'SAMPLE START TIME'
format = '%d/%m/%Y %H:%M:%S'
df.index = pd.to_datetime(df[var2use].values, format=format)
# If a flight ID stated, then only return points for that flight
if isinstance(flight_ID, type(None)):
pass
else:
# Get the beginning and end of the flight
dfS = get_summary4flight(flight_ID=flight_ID)
sdate = dfS.index.min()
edate = dfS.index.max()
# Only consider samples within this time
df = df.loc[df.index >= sdate, :]
df = df.loc[df.index <= edate, :]
return df
def map_SWAS_var2GEOS_var(var, invert=False):
"""
Map variables names from SWAS to GEOS variable names
"""
d = {
# '1_3_butadiene':,
# '1_butene':,
# '2_3_methylpentane':,
# '224_tmp':,
'acetaldehyde': 'ALD2',
'acetone': 'ACET',
# 'acetylene':,
'benzene': 'BENZ', # GEOSChem, but not GEOS-CF output
# 'benzenechb':,
'cis_2_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'cyclo_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'dms': 'DMS', # GEOSChem, but not GEOS-CF output
# 'dmschb':,
'ethane': 'C2H6',
# 'ethene':,
# 'ethylbenzene':,
# 'extra_1':,
# 'extra_2':,
# 'extra_3':,
# 'extra_4':,
# 'extra_5':,
# 'extra_l2':,
'iso_butane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'iso_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'iso_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'isoprene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'methanol': 'MOH',
'mp_xylene': 'XYLE',
'n_butane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'n_heptane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'n_hexane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'n_octane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'n_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes
'o_xylene': 'XYLE',
'pent_1_ene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'propane': 'C3H8',
'propene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'toluene': 'TOLU',
# 'toluenechb':,
'trans_2_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
'trans_2_pentene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes
}
# Invert the dictionary?
if invert:
d = {v: k for k, v in list(d.items())}
return d[var]
def get_ARNA_flight_log_as_df():
"""
Make a single pd.DataFrame with all flight summaries
"""
flight_nums = [
# 216,
217, 218, 219, 220, 221, 222, 223, 224, 225
]
flight_IDs = ['C{}'.format(i) for i in flight_nums]
dfs = []
for flight_ID in flight_IDs:
dfs += [get_summary4flight(flight_ID=flight_ID)]
# Combine and return as a single dataframe sorted by time
df = pd.concat(dfs)
df = df.sort_index()
return df
def get_summary4flight(flight_ID='C217'):
"""
retrieve a FAAM flight summary as a dataframe
"""
folder = '{}/{}/'.format(get_local_folder('ARNA_data'), 'CEDA/v2020_06')
filename = 'flight-sum_faam_*_*_{}.csv'.format(flight_ID.lower())
file2use = glob.glob(folder+filename)
ass_str = 'WARNING: {} flight summaries found present for flight {}!'
assert len(file2use) == 1, ass_str.format(file2use, flight_ID)
# Add Gotcha for missing header in FAAM file archived at CEDA
columns = [
'Event', 'Start', 'Start Hdg / ยฐ', 'Start Hgt / kft', 'Start Lat / ยฐ',
'Start Long / ยฐ', 'Stop', 'Stop Hdg / ยฐ', 'Stop Hgt / kft',
'Stop Lat / ยฐ', ' Stop Long / ยฐ', 'Comment',
]
if flight_ID == 'C217':
header = None
names = columns
else:
header = 0
names = None
# Read file
df = pd.read_csv(file2use[0], header=header, names=names)
# Add a flight ID column
df['flight_ID'] = flight_ID
# Update start column to be in datatime format
var2use = 'Start'
format = '%Y-%m-%d %H:%M:%S'
df.index = pd.to_datetime(df[var2use].values, format=format)
return df
def get_filters_data4flight(flight_ID='C217', all_flights=True):
"""
Retrieve filters data from ARNA flights
"""
# Where is the data?
folder = '{}/{}/'.format(get_local_folder('ARNA_data'), 'Filters')
# What is the name of the sheet in the excel file?
# filename = 'Overview_all_filters_ACSIS_5_and_ARNA-1.xlsx'
# filename = 'Overview_filters_ARNA_2.xlsx'
filename = 'Overview_filters_ARNA_2_TMS_edits.xlsx'
sheet_name = 'Sheet1'
dfFULL = pd.read_excel(folder + filename, sheet_name=sheet_name)
# Now Just look at core data of interest
CoreCols = [
'Day', 'Flight', 'Filter', 'height', 'Time on', 'Time off',
'Airflow (stL)',
]
# - Select nitrate data
# Yes, GEOS-CF has sulfate variables output - 'NIT', 'NITs'
NO3cols = [i for i in dfFULL.columns if 'NO3' in i]
dfN = dfFULL[CoreCols + NO3cols]
#
# NO3_var = 'NO3.total'
NO3_var2use = ['NO3.2', 'NO3.5']
units = 'nanomoles/m3'
# NO3_idx = [list(dfFULL.columns).index(i) for i in var2inc]
# - Also save sulfate?
# Yes, GEOS-CF has sulfate variables output - 'SO4', 'SO4s'
# SO4cols = [i for i in dfFULL.columns if 'SO4' in i]
# dfS = dfFULL[CoreCols + SO4cols]
SO4_var2use = ['SO4.2', 'SO4.5']
units = 'nanomoles/m3'
# SO4_idx = [list(dfFULL.columns).index(i) for i in SO4_var2use]
# - Now chop off excess headers and make sure formats are correct
df = dfFULL.loc[dfFULL.index[2:], :]
#
# idx2use = [list(dfFULL.columns).index(i) for i in CoreCols]
# idx2use += NO3_idx + SO4_idx
# cols2use = [list(dfFULL.columns)[i] for i in idx2use ]
df = df[CoreCols + NO3_var2use + SO4_var2use]
# Replace values less than black/NaNs with np.NaN
df = df.replace('lower than blank', np.NaN)
df = df.replace('NaN', np.NaN)
# Remove blanks (as these are NaNs)
df = df.rename_axis(None)
df = df.loc[(df['height'] != 'blank').values, :]
# Update sampling times to date times
# Start time
TimeOnVar = 'Time on'
sdate_var = 'Sample Start'
df[sdate_var] = df['Day'].astype(str) + ' ' + df[TimeOnVar].astype(str)
format = '%Y-%m-%d %H:%M:%S'
df[sdate_var] = pd.to_datetime(df[sdate_var].values, format=format)
del df[TimeOnVar]
# End time
TimeOffVar = 'Time off'
edate_var = 'Sample End'
df[edate_var] = df['Day'].astype(str) + ' ' + df[TimeOffVar].astype(str)
format = '%Y-%m-%d %H:%M:%S'
df[edate_var] = pd.to_datetime(df[edate_var].values, format=format)
del df[TimeOffVar]
# calculate mid point of sampling and set this as index
interval_var = 'Sample interval'
df[interval_var] = df[edate_var] - df[sdate_var]
# Just use the middle of the timestep
df.index = df[sdate_var] + (df[interval_var]/2)
df = df.rename_axis(None)
# - Just consider totals for species of interest
NO3_var = 'NO3.total'
df[NO3_var] = df[NO3_var2use].sum(axis=1)
SO4_var = 'SO4.total'
df[SO4_var] = df[SO4_var2use].sum(axis=1)
del dfFULL
# Convert to ug/m3 from 'nanomoles/m3'
df[NO3_var] = df[NO3_var].values / 1E9 * AC.species_mass('NIT') * 1E6
df[SO4_var] = df[SO4_var].values / 1E9 * AC.species_mass('SO4') * 1E6
# Return all flights unless a specific flight requested
if all_flights:
return df
else:
return df.loc[df['Flight'] == flight_ID, :]
def get_CIMS_data4flight(flight_ID='C225', resample_data=True, debug=False):
"""
Retrieve ToF-CIMS data from ARNA flights
"""
# Where is the data?
folder = '{}/{}/'.format(get_local_folder('ARNA_data'), 'CIMS')
# What is the name of the sheet in the excel file?
sheet_name = flight_ID
# - Setup a shared (1Hz) time axis to merge onto
# Use time index for timings of flight, then add a 2 hour buffer
dfS = get_summary4flight(flight_ID=flight_ID)
sdate = dfS.index.values.min()
edate = dfS.index.values.max()
sdate, edate = AC.dt64_2_dt([sdate, edate])
sdate = AC.add_hrs(sdate, -2)
edate = AC.add_hrs(sdate, 2)
index = pd.date_range(start=sdate, end=edate, freq='1S')
dfM = pd.DataFrame(index=index)
# - Retrive the core CIMS observations (use the time coordinate for index)
try:
filename = 'ACSIS6_0.1hz_Bromine.xlsx'
format = '%m/%d/%Y %H:%M:%S'
df = pd.read_excel(folder + filename, sheet_name=sheet_name,
date_parser=format)
xl = pd.ExcelFile(folder + filename)
if debug:
print(xl.sheet_names)
dt_var = 'Date:Time'
dates = AC.dt64_2_dt(df[dt_var].values)
dates = [i.strftime(format) for i in dates]
dates = [datetime_.strptime(i, '%d/%m/%Y %H:%M:%S') for i in dates]
df.index = dates
del df[dt_var]
# Merge the files
dfM = pd.concat([dfM, df], axis="index")
except:
pstr = "WARNING: failed to include CIMS halogen data for '{}' in df"
print(pstr.format(flight_ID))
# - Also get HNO3 / HONO (use the time coordinate for index)
try:
filename = 'ACSIS6_ARNA_1hz_HNO3_HONO_ToSend.xlsx'
df2 = pd.read_excel(folder + filename, sheet_name=sheet_name)
xl = pd.ExcelFile(folder + filename)
if debug:
print(xl.sheet_names)
dt_var = 'date'
# df2.index = pd.to_datetime( df2[dt_var].values, format='%m/%d/%Y %H:%M:%S')
dates = AC.dt64_2_dt(df2[dt_var].values)
dates = [i.strftime(format) for i in dates]
dates = [datetime_.strptime(i, '%d/%m/%Y %H:%M:%S') for i in dates]
df2.index = dates
del df2[dt_var]
# Merge the files
dfM =
|
pd.concat([dfM, df2], axis="index")
|
pandas.concat
|
# Import necessary packages
import pymeasure
from pymeasure.instruments.keithley import Keithley2400
import numpy as np
import pandas as pd
from time import sleep
from time import time
from matplotlib import pyplot as plt
import datetime
import os
print('JVfunctions Loaded Without Error')
##Basic JV scan function
#############################################################################
#define function with inputs
def JVscan(cell_name, cell_area, v_in, v_fin, averages, data_points, plc, bufdelay, prebias, biasV, biastime, pulse, pulsedelay):
direc = 'C:/Users/IECs Finest/Desktop/Jupyter/JV/'
os.chdir(direc)
now = datetime.datetime.now() #Get date and time
currtime = now.strftime("%m%d%y%H%M") #get formatted time
foldtime = now.strftime("%m-%d-%y")
if os.path.isdir(foldtime) is False:
os.mkdir(foldtime)
os.chdir(foldtime)
if os.path.isdir(cell_name) is False:
os.mkdir(cell_name)
os.chdir(cell_name)
# Connect and configure the instrument
sourcemeter = Keithley2400('GPIB0::24::INSTR')
sourcemeter.reset() #reset meter
sourcemeter.use_front_terminals() #sets to use front terminals
sourcemeter.compliance_voltage = 10 # Sets the compliance voltage to 10 V
sourcemeter.apply_voltage() #set meter to apply voltage
sourcemeter.measure_current(nplc=plc, current=1e-2, auto_range=True) #set meter to measure current
#sourcemeter.config_voltage_source()
sleep(bufdelay) # wait here to give the instrument time to react
sourcemeter.config_buffer(averages,bufdelay) #configure the buffer
# Allocate arrays to store the measurement results
voltagedown = np.linspace(v_fin, v_in, num=data_points)
currentdown = []
currentdown_stds = []
powerdown=[]
voltageup = np.linspace(v_in, v_fin, num=data_points)
currentup = []
currentup_stds = []
powerup=[]
now = datetime.datetime.now() #Get date and time
currtime = now.strftime("%m%d%y%H%M") #get formatted time
foldtime = now.strftime("%m-%d-%y")
print('scan1')
sourcemeter.enable_source() # enable the source
#Prebias the cell for steady state or bias pretreatment
if prebias== True:
print('Prebiasing at ')
print(str(biasV)+' volts')
sourcemeter.source_voltage= biasV #prebias at "final voltage"
sourcemeter.measure_current(nplc=plc, current=1e-2, auto_range=True) #set meter to measure current
sleep(biastime)
#Begin the actual scan
# Loop through each voltage point, measure and record the current density
print('Scan Down')
for i in range(data_points):
sourcemeter.source_voltage= voltagedown[i]
sourcemeter.measure_current(nplc=plc, current=1e-2,auto_range=True)
sourcemeter.reset_buffer()
#sleep(0.1)
sourcemeter.start_buffer()
sleep(bufdelay)
#sourcemeter.wait_for_buffer(timeout=1, interval=0.05)
# Record the average and standard deviation of the current density
currentdown.append(sourcemeter.means[1]/cell_area)
currentdown_stds.append(sourcemeter.standard_devs[1]/cell_area)
#Calculate the power (which in this case is equal to efficiency)
powerdown.append(voltagedown[i]*sourcemeter.means[1]/cell_area*-1000)
if pulse==True:
sourcemeter.source_voltage = biasV
sourcemeter.measure_current(nplc=plc, current=1e-2,auto_range=True)
sleep(pulsedelay)
# Loop through each voltage point, measure and record the current density
print('Scan Up')
for i in range(data_points):
sourcemeter.source_voltage = voltageup[i]
sourcemeter.reset_buffer()
#sleep(0.1)
sourcemeter.start_buffer()
sleep(bufdelay)
#sourcemeter.wait_for_buffer(timeout=1, interval=0.05)
# Record the average and standard deviation of the current density
currentup.append(sourcemeter.means[1]/cell_area)
currentup_stds.append(sourcemeter.standard_devs[1]/cell_area)
#Calculate the power (which in this case is equal to efficiency)
powerup.append(voltageup[i]*sourcemeter.means[1]*-1000/cell_area)
if pulse==True:
sourcemeter.source_voltage = biasV
sourcemeter.measure_current(nplc=plc, current=1e-2,auto_range=True)
sleep(pulsedelay)
data1 = pd.DataFrame({
'Currentup (A)': currentup,
'Voltageup (V)': voltageup,
'Powerup (mW/cm2)': powerup,
'Currentup Std (V)': currentup_stds,
'Currentdown (A)': currentdown,
'Voltagedown (V)': voltagedown,
'Power down (mW/cm2)': powerdown,
'Currentdown Std (V)': currentdown_stds,
})
filename=cell_name+currtime+str(data_points)+'_pts'+str(averages)+'_avg'+str(plc)+'_plcs'+'_JV.csv'
data1.to_csv(filename)
plt.plot(data1['Voltageup (V)'],data1['Currentup (A)'])
plt.plot(data1['Voltagedown (V)'],data1['Currentdown (A)'])
plt.xlim(-.6,1.2)
plt.ylim(-25/1000,25/1000)
sourcemeter.shutdown()
#JVscan(v_in, v_fin, averages, data_points, plc, bufdelay, prebias)
################################################################################################################
#MPPT function
def MPPT(cell_name,cell_area,plc,averages, data_points, i_volt, timedelay):
#define some variables and get current date and time
bufdelay=0.1 # delay time to ensure there is enough time to communicate with SMU
#i_volt=0.001 #initial guess voltage, can be changed to start mppt scan at fwd bias or near mppt instead of near 0
now = datetime.datetime.now() #Get date and time
currtime = now.strftime("%m%d%y%H%M") #get formatted time
foldtime = now.strftime("%m-%d-%y")
# Connect and configure the instrument
sourcemeter = Keithley2400('GPIB0::24::INSTR') #Connect
sourcemeter.reset()
sourcemeter.use_front_terminals()
sourcemeter.compliance_voltage = 2 # Sets the compliance voltage to 2V
sourcemeter.apply_voltage()
sourcemeter.measure_current(nplc=plc, current=1e-2, auto_range=True)
sleep(bufdelay) # wait here to give the instrument time to react
sourcemeter.config_buffer(averages,bufdelay)
#set voltage to initial voltage setpoint
volt=i_volt
sourcemeter.source_voltage = volt
pval=0.1 #initial value for power
pscale=0.01 #scaling factor
scale=0.01 #scaling factor
sourcemeter.enable_source()
voltnew=volt
start_time = time() # get start time of measurement for time tracking
try: #inside a try loop so you can interrupt the kernel and still save the current measurement
while True: #This makes the mppt scan go until you press abort, you could set it to do a number of scans instead
sourcemeter.enable_source() # enable source
# Allocate arrays to store the measurement results
voltages = []
currents = []
current_stds = []
powers= []
meas_num=[]
sourcemeter.source_voltage = volt
pval=0.1
pscale=0.01
scale=0.01 #this value scales the random number the voltage is scaled by: smaller number will result in more stable mppt but slower time to reach mppt
sourcemeter.enable_source()
voltnew=volt
now = datetime.datetime.now()
currtime = now.strftime("%m%d%y%H%M")
for i in range(data_points):
direc = 'C:/Users/<NAME>/Desktop/Jupyter/MPPT/'
os.chdir(direc)
now = datetime.datetime.now() #Get date and time
currtime = now.strftime("%m%d%y%H%M") #get formatted time
foldtime = now.strftime("%m-%d-%y")
if os.path.isdir(foldtime) is False:
os.mkdir(foldtime)
os.chdir(foldtime)
if os.path.isdir(cell_name) is False:
os.mkdir(cell_name)
os.chdir(cell_name)
random_val=scale*np.random.rand(1).tolist()[0] #a random value to increase the voltage set point between 0 and 10mV
#increase new voltage based on random number
sourcemeter.source_voltage=voltnew #set voltage
sourcemeter.reset_buffer() #reset buffer
sleep(bufdelay) #wait
sourcemeter.start_buffer()
sleep(bufdelay)
# Record the average and standard deviation
voltages.append(voltnew) #record the voltage the cell is at
currents.append(sourcemeter.means[1]*1000/cell_area) #record the average current measurement of the number of averages
current_stds.append(sourcemeter.standard_devs[1]) #record the standard deviation
powers.append(currents[i]*voltnew*-1) #calculate and record the power
pvalnew=powers[i] #update pval to calculate the scaling factor
pscale=pvalnew-pval #the difference in power from the last to new measurement, tells wether the voltage iteration made power higher or lower
vscale=voltnew-volt #the difference in voltage from last to new measurement, telling whether you increased or decreased the voltga
sleep(timedelay) #allows you to slow down measurements by adding a wait time (you will get fewer points)
meas_num.append(time()-start_time) #record the time since the measurement started
volt=voltnew #update the old voltage with the new voltage since the measurement cycle is over
#control algorithm using the perturb and observe method
if pscale>0:
if vscale>0:
voltnew=volt+random_val
else:
voltnew=volt-random_val
elif pscale<0:
if vscale>0:
voltnew=volt-random_val
else:
voltnew=volt+random_val
else:
voltnew=volt
pval=pvalnew
print('Pmax = '+str(round(pval,3))+'%'+' ',end='\r') #prints the power
sourcemeter.shutdown() #shuts down source at end of measurement
tempdict = {
'Measurement_Number' : meas_num,
'Voltage' : voltages,
'Currents' : currents,
'current_stds' : current_stds,
'powers' : powers
} #put arrays of values in a temporary dictionary to save data
data=pd.DataFrame(tempdict) #put into a pandas dataframe to save data
filename= currtime+'_'+str(cell_name)+'MPPT'+'.csv' #define file name
data.to_csv(filename) #save file
sleep(1)
except:
#KeyboardInterrupt #this is so the data will save for the current scan you're doing when you interrupt the kernel
print('Interrupted ')
if len(meas_num)<len(voltages):
meas_num.append(time()-start_time)
if len(currents)<len(voltages):
currents.append(np.nan)
if len(current_stds)<len(currents):
current_stds.append(np.nan)
if len(powers)<len(current_stds):
powers.append(np.nan)
sourcemeter.shutdown()
tempdict = {
'Measurement_Number' : meas_num,
'Voltage' : voltages,
'Currents' : currents,
'current_stds' : current_stds,
'powers' : powers
}
data=pd.DataFrame(tempdict)
filename= currtime+'_'+str(cell_name)+'MPPT'+'.csv'
data.to_csv(filename)
plt.scatter(tempdict['Measurement_Number'],tempdict['powers'])
print('Measurement complete')
print('Files saved: ')
print(filename)
################################################################################################################
#MPPTJV function
def MPPTJV(plc,averages, data_points, i_volt, timedelay, cell_name, cell_area, v_in, v_fin, bufdelay, prebias, biasV, biastime, pulse, pulsedelay):
#define some variables and get current date and time
bufdelay=0.1 # delay time to ensure there is enough time to communicate with SMU
#i_volt=0.001 #initial guess voltage, can be changed to start mppt scan at fwd bias or near mppt instead of near 0
now = datetime.datetime.now() #Get date and time
currtime = now.strftime("%m%d%y%H%M") #get formatted time
foldtime = now.strftime("%m-%d-%y")
# Connect and configure the instrument
sourcemeter = Keithley2400('GPIB0::24::INSTR') #Connect
sourcemeter.reset()
sourcemeter.use_front_terminals()
sourcemeter.compliance_voltage = 2 # Sets the compliance voltage to 2V
sourcemeter.apply_voltage()
sourcemeter.measure_current(nplc=plc, current=1e-2, auto_range=True)
sleep(bufdelay) # wait here to give the instrument time to react
sourcemeter.config_buffer(averages,bufdelay)
#set voltage to initial voltage setpoint
volt=i_volt
sourcemeter.source_voltage = volt
pval=0.1 #initial value for power
pscale=0.01 #scaling factor
scale=0.01 #scaling factor
sourcemeter.enable_source()
voltnew=volt
start_time = time() # get start time of measurement for time tracking
try: #inside a try loop so you can interrupt the kernel and still save the current measurement
while True: #This makes the mppt scan go until you press abort, you could set it to do a number of scans instead
sourcemeter.enable_source() # enable source
# Allocate arrays to store the measurement results
voltages = []
currents = []
current_stds = []
powers= []
meas_num=[]
sourcemeter.source_voltage = volt
pval=0.1
pscale=0.01
scale=0.01 #this value scales the random number the voltage is scaled by: smaller number will result in more stable mppt but slower time to reach mppt
sourcemeter.enable_source()
voltnew=volt
now = datetime.datetime.now()
currtime = now.strftime("%m%d%y%H%M")
for i in range(data_points):
direc = 'C:/Users/IECs Finest/Desktop/Jupyter/MPPT/'
os.chdir(direc)
now = datetime.datetime.now() #Get date and time
currtime = now.strftime("%m%d%y%H%M") #get formatted time
foldtime = now.strftime("%m-%d-%y")
if os.path.isdir(foldtime) is False:
os.mkdir(foldtime)
os.chdir(foldtime)
if os.path.isdir(cell_name) is False:
os.mkdir(cell_name)
os.chdir(cell_name)
random_val=scale*np.random.rand(1).tolist()[0] #a random value to increase the voltage set point between 0 and 10mV
#increase new voltage based on random number
sourcemeter.source_voltage=voltnew #set voltage
sourcemeter.reset_buffer() #reset buffer
sleep(bufdelay) #wait
sourcemeter.start_buffer()
sleep(bufdelay)
# Record the average and standard deviation
voltages.append(voltnew) #record the voltage the cell is at
currents.append(sourcemeter.means[1]*1000/cell_area) #record the average current measurement of the number of averages
current_stds.append(sourcemeter.standard_devs[1]) #record the standard deviation
powers.append(currents[i]*voltnew*-1) #calculate and record the power
pvalnew=powers[i] #update pval to calculate the scaling factor
pscale=pvalnew-pval #the difference in power from the last to new measurement, tells wether the voltage iteration made power higher or lower
vscale=voltnew-volt #the difference in voltage from last to new measurement, telling whether you increased or decreased the voltga
sleep(timedelay) #allows you to slow down measurements by adding a wait time (you will get fewer points)
meas_num.append(time()-start_time) #record the time since the measurement started
volt=voltnew #update the old voltage with the new voltage since the measurement cycle is over
#control algorithm using the perturb and observe method
if pscale>0:
if vscale>0:
voltnew=volt+random_val
else:
voltnew=volt-random_val
elif pscale<0:
if vscale>0:
voltnew=volt-random_val
else:
voltnew=volt+random_val
else:
voltnew=volt
pval=pvalnew
print('Pmax = '+str(round(pval,3))+'%'+' ',end='\r') #prints the power
sourcemeter.shutdown() #shuts down source at end of measurement
tempdict = {
'Measurement_Number' : meas_num,
'Voltage' : voltages,
'Currents' : currents,
'current_stds' : current_stds,
'powers' : powers
} #put arrays of values in a temporary dictionary to save data
data=
|
pd.DataFrame(tempdict)
|
pandas.DataFrame
|
#Copyright July 2021 Ontocord LLC. Licensed under Apache v2 https://www.apache.org/licenses/LICENSE-2.0
#datastore.py
from collections.abc import Iterable
from dataclasses import dataclass, field, fields
from typing import Any, ClassVar, Dict, List, Optional, Sequence, Tuple, Union
from typing import TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from datasets.info import DatasetInfo
from datasets.features import PandasArrayExtensionArray, PandasArrayExtensionDtype, Features, Value, cast_to_python_objects, pandas_types_mapper
from datasets import utils, Dataset
from datasets.splits import NamedSplit
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence
import os
import json
from pathlib import Path
from datasets.utils.typing import PathLike
from datasets.arrow_dataset import transmit_format# , replayable_table_alteration
#from transformers import PreTrainedModel, PretrainedConfig
import copy
import shutil
from datasets.fingerprint import (
fingerprint_transform,
generate_fingerprint,
generate_random_fingerprint,
get_temporary_cache_files_directory,
is_caching_enabled,
update_fingerprint,
)
from datasets.dataset_dict import DatasetDict
from torch import nn
import pickle
import glob, shutil, os, time
import indexed_gzip as igzip
#import zstandard, io
#from gzip_stream import GZIPCompressedStream
import fsspec.compression
from flask_sqlalchemy import SQLAlchemy
from flask import Flask
import dataset
import six
from six.moves.urllib.parse import parse_qs, urlparse
from datastore_utils import *
### NOTE: dataset is a different package than datasets. We are using both packages.
### We want to have mutliple types of storage that ideally can be
### transported as a file transfer with an arrow dataset. So if we
### have <signature>.arrow, we may have fts_<signature>.db (for full
### text indexing) and db_<signature>.db (sqlite database), and
### <siganture>.mmap (mmap file reprsenting a tensor), and
### <singature>.igz (if we wish to store some portion of the text
### columns in igzip format for compression and legacy purposes.
class FeaturesWithViews(Features):
def copy(self):
ret= FeaturesWithViews(super().copy())
if hasattr(self, "features_map"):
ret.features_map = copy.deepcopy(self.features_map)
return ret
def __repr__(self):
ret = "{"+"\n".join([f"'{a[0]}': {a[1]}" for a in self.items() if a[0] not in self.features_map])
if self.features_map:
ret = ret+"\n"+"\n".join(f"'{a[0]}': View({a[1]})" for a in self.features_map.items())
ret +="}"
return ret
class Datastore(Dataset): #, dict
"""
A class that wraps a Huggingface arrow based Dataset to provide some optimized reading and *writing* in various persistance backends.
Currently provides support for columns bound to memmap, igzip file, and sqlalchemy databases.
"""
@property
def features(self):
ret = FeaturesWithViews(self._info.features)
ret.features_map = {} if not hasattr(self, "features_map") else self.features_map
return ret
def __repr__(self):
return f"Datastore({{\n features: {list(self.features.keys())},\n num_rows: {self.num_rows}\n}})"
@classmethod
def from_dataset(cls, dataset, features_map=None, shared_dir=None):
self = cls(
arrow_table=dataset._data,
indices_table=dataset._indices,
info=dataset._info,
split=dataset._split,
fingerprint=dataset._fingerprint,
)
if hasattr(dataset, "mmap_access_cnt"):
self.mmap_access_cnt=dataset.mmap_access_cnt
else:
self.mmap_access_cnt=0
if hasattr(dataset, "features_map"):
self.features_map=copy.deepcopy(dataset.features_map)
if features_map is not None:
self.features_map = copy.deepcopy(features_map)
if not hasattr(self, "features_map"):
self.features_map = {}
if hasattr(dataset, "shared_dir"):
self.shared_dir=shared_dir
if shared_dir is not None:
self.shared_dir = shared_dir
if not hasattr(self, "shared_dir"):
self.shared_dir = {}
return self
def _get_mmap(self, mmap_file_path, dtype, shape):
if shape[0] < len(self):
shape[0] = len(self)
# what happens when the datastore shrinks??
if os.path.exists(mmap_file_path):
ret= np.memmap(filename=mmap_file_path, mode="r+", dtype=np.dtype(dtype), shape=tuple(shape))
else:
ret = np.memmap(filename=mmap_file_path, mode="w+", dtype=np.dtype(dtype), shape=tuple(shape))
if self.mmap_access_cnt % 100==0: #let's flush intermittently just in case the OS needs to synch.
ret.flush()
self.mmap_access_cnt=0
self.mmap_access_cnt+=1
return ret
# we use class variables because we don't want it serialized in an instance of this dataset
igzip_fobj = {}
def _get_igzip_fobj(self, file_path):
if file_path in igzip_fobj:
return igzip_fobj[file_path]
igzip_fobj[file_path] = fobj = get_igzip_obj(file_path)
return fobj
# we use class variables because we don't want it serialized in this instance
db_table = {}
db_connection = {}
def _get_db_table(self, table_name, connection_url):
if (table_name, connection_url) in db_table:
table = db_table[(table_name, connection_url)]
else:
if connection_url in db_connection:
db = db_connection[connection_url]
else:
db_connection[connection_url] = db = DatabaseExt(connection_url)
db_table[(table_name, connection_url)] = table = db[table_name]
return table
# todo, change the id key from "id" to something custom. this needs to be stored in the table meta-data.
@staticmethod
def _add_idx(batch, indices, idx,):
batch[idx] = indices # will this be shuffled if we are in shuffled mode?
return batch
#mapping a columun to a memmap array accessed by row
def set_mmap_feature_view(self, feature_view, shape, mmap_path=None, dtype='float32', dtype_str_len=1000, idx_column="id", batch_size=100000, num_proc=4, map_fn=None):
dataset_path = os.path.dirname(self.cache_files[0]['filename'])
if mmap_path is None:
mmap_path = os.path.abspath(os.path.join(dataset_path, feature_view+".mmap"))
shape = list(shape)
shape[0] = len(self)
if idx_column not in self.features:
self = self.map(Datastore._add_idx, with_indices=True, batch_size=batch_size, batched=True, num_proc=num_proc, fn_kwargs={'idx': idx_column})
if not isinstance(dtype, str):
dtype =np.dtype(dtype).name
self.features_map[feature_view] = {'type':"mmap", 'path': mmap_path, 'dtype': dtype, 'shape': shape}
return self
#mapping a column to an indexed gzip file accesed by line
def set_igzip_feature_view(self, feature_view, path, idx_column="id", batch_size=100000, num_proc=4, map_fn=None):
fobj = self._get_igzip_fobj(path)
if idx_column not in self.features:
self = self.map(Datastore._add_idx, with_indices=True, batch_size=batch_size, batched=True, num_proc=num_proc, fn_kwargs={'idx': idx_column})
if len(fobj) > len(self):
self.add_item({idx_column: range(learn(self), len(fobj))})
self.features_map[feature_view] = {'type':"igzip", 'path': path}
return self
# mapping columns to a sql database. creates a sqlalchmey/dataset dynamically with idx_column as the primary key.
def set_sql_feature_view(self, table_name, connection_url, columns=None, idx_column="id", batch_size=100000, num_proc=4, map_fn=None):
table = _get_db_table(table_name, connection_url)
if table.columns:
columns = table.columns
elif not columns:
raise RuntimeError(f"No column definition for table view {table_name}")
if idx_column not in self.features:
self = self.map(Datastore._add_idx, with_indices=True, batch_size=batch_size, batched=True, num_proc=num_proc, fn_kwargs={'feature_view': idx_column})
if len(table) > len(self):
self.add_item({idx_column: range(len(self), len(table))})
for col in columns:
if col == idx_column:
continue
if col in self.features:
raise RuntimeError(f"Column {col} already in the dataset")
self.features_map[column] = {'type':'sql', 'connection_url': connection_url, 'table_name': table_name, 'column': column}
return self
# note that while the id column corresponds to an index into an external storage, accessing an arrow dataset by index
# will not be guranteed to get the corresponding id. a[0] will return the first item in the current subset of the dataset.
# but a[0] may return {'id': 10, 'mmap_embed': <array correponding to the 10th location in the mmap file>}
def _getitem(
self,
key: Union[int, slice, str], # should this be list as well??
format_type=None,
format_columns=None,
output_all_columns=False,
format_kwargs=None,
) -> Union[Dict, List]:
# assumine we do error checking re format_columns and output_all_columns at a higher level??
format_columns = copy.copy(format_columns)
# this is the case where we are not getting any views.
if (not hasattr(self, "features_map")) or (hasattr(self, "features_map") and not self.features_map) or (hasattr(self, "features_map") and type(key) is str and key not in self.features_map):
return super()._getitem(
key,
format_type=format_type,
format_columns=format_columns,
output_all_columns=output_all_columns,
format_kwargs=format_kwargs)
# this is the case where there are more than one columns, some of which might
# be an arrow column and at least one view. For the view, we need to also get the "id".
# let's prepare the parameters to get just the arrow portion of the dataset
orig_key = key
if type(key) is str:
if not format_columns:
format_columns = [key]
else:
format_columns.append(key)
if key in self.features_map:
key = "id"
missing=[]
if format_columns:
for c in copy.copy(format_columns):
if c in self.features_map:
missing.append(c)
format_columns.remove(c)
if "id" not in format_columns:
format_columns.append("id")
else:
missing.append("id")
# let's get the data that is in the arrow data first, including the id
outputs = super()._getitem(
key,
format_type=format_type,
format_columns=format_columns,
output_all_columns=output_all_columns,
format_kwargs=format_kwargs)
# this is the case where we are only getting view data, so the only arrow data returned is the 'id'.
# so we need the id column identified so we can index into the view data source.
if type(outputs) in (np.array, list):
outputs = {'id': outputs}
# do some cleanup.
if type(orig_key) is str and format_columns and "id" in format_columns:
format_columns.remove("id")
if format_columns is not None:
format_columns.extend(missing)
# now get the views and combine views and arrow data
return self._format_views(outputs, format_columns=format_columns, format_type=format_type,
output_all_columns=output_all_columns, format_kwargs=format_kwargs)
def _format_views(self,
outputs_or_keys,
format_type=None,
format_columns=None,
output_all_columns=False,
format_kwargs=None):
def getitems(self, outputs, keys, contiguous, start, end, format_columns, output_all_columns, mmap_by_items):
if not format_columns:
items = list(self.features_map.items())
else:
items = [(column, self.features_map[column]) for column in format_columns if column in self.features_map]
sql_results = {}
for feature, val in items:
if val['type'] == 'mmap':
if mmap_by_items:
if contiguous:
outputs[feature] = [ self._get_mmap(val['path'], val['dtype'], val['shape']) for i in range(start, end)]
else:
outputs[feature] = [ self._get_mmap(val['path'], val['dtype'], val['shape']) for i in keys]
else:
if contiguous:
outputs[feature] = self._get_mmap(val['path'], val['dtype'], val['shape'])[start:end]
else:
outputs[feature] = self._get_mmap(val['path'], val['dtype'], val['shape'])[keys]
elif val['type'] == 'igzip':
if contiguous:
outputs[feature] = self._get_igzip_fobj(val['path'])[start:end]
else:
outputs[feature] = self._get_igzip_fobj(val['path'])[keys]
elif val['type'] == 'sql':
sql_results[(val['table_name'], val['connection_url'])] = sql_results.get((val['table_name'], val['connection_url']),[])+[feature]
for table_connection, features in sql_results:
table_name, connection_url = table_connection
table= self._get_db_table(table_name, connection_url)
if contiguous:
for row in table.find((table.id, 'between', (start, end)), _columns=features):
for feature in features:
outputs[feature] = output.get(feature,[]) + row[feature]
else:
for row in table.find((table.id, 'in', keys), _columns=features):
for feature in features:
outputs[feature] = output.get(feature,[]) + row[feature]
return outputs
format_kwargs = format_kwargs if format_kwargs is not None else {}
format_columns = format_columns if format_columns is not None else []
start = end = 0
contiguous = False
if format_type in ("custom", "torch", "tensorflow", None) and type(outputs_or_keys) is not pd.DataFrame:
transform = format_kwargs.get('transform')
if isinstance(outputs_or_keys, str):
keys = slice(0, len(self))
outputs = {}
contiguous=True
elif isinstance(outputs_or_keys, slice):
keys = outputs_or_keys
outputs = {}
contiguous=True
elif isinstance(outputs_or_keys, dict):
keys = outputs_or_keys["id"]
outputs = outputs_or_keys
else:
keys = outputs_or_keys
outputs = {}
if not contiguous:
if isinstance(keys, int):
contiguous = False
else:
contiguous, start, end = is_contiguous(keys)
else:
if isinstance(keys, slice):
start = 0 if keys.start is None else keys.start
end = len(self) if keys.stop is None else keys.stop
else:
start = keys[0]
end = keys[-1]+1
outputs = getitems(self, outputs, keys, contiguous, start, end, format_columns, output_all_columns, mmap_by_items=False)
if transform is not None:
outputs = transform(outputs)
if "id" in outputs and format_columns and "id" not in format_columns: del outputs["id"]
# is this right. will custom ever return a dict type if there is only one column, or do we
# default to returning the only column.
if len(outputs) == 1: outputs = list(outputs.values())[0]
if format_type == "torch":
import torch
return torch.tensor(outputs, **format_kwargs)
elif format_type == "tensorflow":
import tensorflow
return tensorflow.ragged.constant(outputs, **format_kwargs)
else:
return outputs
elif format_type == "pandas" or isinstance(outputs_or_keys, pd.DataFrame):
# do we do transforms for this case??
if isinstance(outputs_or_keys, str):
start = 0
end = len(self)
keys = range(start, stop)
outputs = None
contiguous=True
elif isinstance(outputs_or_keys, slice):
start = 0 if outputs_or_keys.start is None else outputs_or_keys.start
end = len(self) if outputs_or_keys.stop is None else outputs_or_keys.stop
keys = range(outputs_or_keys.start, outputs_or_keys.stop)
outputs = None
contiguous=True
elif isinstance(outputs_or_keys, dict) or isinstance(outputs_or_keys, pd.DataFrame):
outputs = outputs_or_keys
outputs = pd.DataFrame(outputs)
keys = outputs_or_keys["id"]
contiguous, start, end = is_contiguous(keys)
else:
raise RuntimeError("got unknown outputs or keys type")
if outputs is None:
outputs = pd.DataFrame()
outputs = getitems(self, outputs, keys, contiguous, start, end, format_columns, output_all_columns, mmap_by_items=True)
if "id" in outputs and format_columns and "id" not in format_columns:
outputs.drop("id", axis=1)
if len(format_columns) == 1:
outputs = outputs[format_columns[0]]
return outputs
raise RuntimeError("got unknown outputs or keys type")
def to_csv(
self,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
**to_csv_kwargs,
) -> int:
pass
def to_dict(self, batch_size: Optional[int] = None, batched: bool = False) -> Union[dict, Iterator[dict]]:
if (not hasattr(self, "features_map") or not self.features_map) and len(self.features) == 1 and "id" in self.features:
return {}
#TODO - put back direct mmap access method here?
ret = super().to_dict(batch_size=batch_size, batched=batched)
if isinstance(ret, Iterator):
for r in ret:
yield self._format_views(r, contiguous=True)
return
return self._format_views(ret, contiguous=True)
def to_pandas(
self, batch_size: Optional[int] = None, batched: bool = False
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
if (not hasattr(self, "features_map") or not self.features_map) and len(self.features) == 1 and "id" in self.features:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
"""
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from io import BytesIO, StringIO
from google.cloud import storage
import tensorflow as tf
import numpy as np
import os
import argparse
PROJECT = 'loan-delinq-kubeflow'
BUCKET = 'loan-delinq-bucket'
def obtain_train_eval(PROJECT, BUCKET):
# # All of the data is in a file called Step10_Final_dataset.csv
print('reading the data file from gcs...')
######################################################################
######################################################################
### Datalab doesn't support ` from google.cloud import storage ` so if not using
### datalab, use the following (uncomment the following line) to import data
### differently
# # The following was derived from the contents of this reply: https://stackoverflow.com/a/50201179
storage_client = storage.Client(project=PROJECT, credentials=None)
bucket = storage_client.get_bucket(BUCKET)
blob = bucket.blob('input/Step10_Final_dataset.csv')
byte_stream = BytesIO()
blob.download_to_file(byte_stream)
byte_stream.seek(0)
df = pd.read_csv(byte_stream)
# # # We need to rearrange the columns below just as they shall be expected by the estimator
print('rearranging data...')
KEY_COLUMN = 'LOAN_SEQUENCE_NUMBER'
LABEL_COLUMN = 'TARGET'
bool_cols = []
int_cols = ['credit_score', 'mortgage_insurance_percentage', 'Number_of_units', 'cltv', 'original_upb',
'ltv', 'original_loan_term', 'number_of_borrowers','min_CURRENT_DEFERRED_UPB']
str_cols = ['first_time_home_buyer_flag', 'occupancy_status', 'channel', 'property_state',
'property_type', 'loan_purpose', 'seller_name', 'service_name']
str_nuniques = [2, 3, 3, 52, 5, 2, 20, 24]
float_cols = ['metropolitan_division', 'original_interest_rate', 'min_CURRENT_ACTUAL_UPB', 'max_CURRENT_ACTUAL_UPB',
'Range_CURRENT_ACTUAL_UPB', 'stdev_CURRENT_ACTUAL_UPB', 'mode_CURRENT_ACTUAL_UPB', 'average_CURRENT_ACTUAL_UPB',
'max_CURRENT_DEFERRED_UPB', 'Range_CURRENT_DEFERRED_UPB', 'mode_CURRENT_DEFERRED_UPB', 'average_CURRENT_DEFERRED_UPB',
'stdev_CURRENT_DEFERRED_UPB', 'min_CURRENT_INTEREST_RATE', 'max_CURRENT_INTEREST_RATE', 'Range_CURRENT_INTEREST_RATE',
'mode_CURRENT_INTEREST_RATE', 'stdev_CURRENT_INTEREST_RATE', 'average_CURRENT_INTEREST_RATE',
'PREFINAL_LOAN_DELINQUENCY_STATUS', 'frequency_0', 'frequency_1', 'frequency_2', 'frequency_3',
'Recency_0', 'Recency_1', 'Recency_2', 'Recency_3']
DEFAULTS = [[''] for col in bool_cols] + [[0] for col in int_cols] + [[0.0] for col in float_cols] + \
[[''] for col in str_cols] + [[''],[0]]
CSV_COLUMNS = bool_cols + int_cols + float_cols + str_cols + [KEY_COLUMN,LABEL_COLUMN]
traindata = df[CSV_COLUMNS]
# # Here, we'll split with a small test size so as to allow our model to train on more data
print('splitting...')
X_train, X_test, y_train, y_test = train_test_split(
traindata.drop(LABEL_COLUMN, axis=1), traindata[LABEL_COLUMN],
stratify=traindata[LABEL_COLUMN], shuffle=True, test_size=0.1)
traindf = pd.concat([X_train, y_train], axis=1)
evaldf = pd.concat([X_test, y_test], axis=1)
alld =
|
pd.concat([traindf, evaldf])
|
pandas.concat
|
import unittest
from unittest.mock import MagicMock
import pandas as pd
from pandas.testing import assert_frame_equal
from data_export.models import DATA
from data_export.pipeline.formatters import (
DictFormatter,
FastTextCategoryFormatter,
JoinedCategoryFormatter,
ListedCategoryFormatter,
RenameFormatter,
TupledSpanFormatter,
)
TARGET_COLUMN = "labels"
class TestDictFormatter(unittest.TestCase):
def setUp(self):
self.return_value = {"label": "Label"}
label = MagicMock()
label.to_dict.return_value = self.return_value
self.dataset = pd.DataFrame([{TARGET_COLUMN: [label]}])
def test_format(self):
formatter = DictFormatter(TARGET_COLUMN)
dataset = formatter.format(self.dataset)
expected_dataset = pd.DataFrame([{TARGET_COLUMN: [self.return_value]}])
assert_frame_equal(dataset, expected_dataset)
class TestJoinedCategoryFormatter(unittest.TestCase):
def setUp(self):
self.return_value = "Label"
label = MagicMock()
label.to_string.return_value = self.return_value
self.dataset =
|
pd.DataFrame([{TARGET_COLUMN: [label]}])
|
pandas.DataFrame
|
import os
import datetime
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
curr_dir = os.getcwd()
project_result_dir = '\\project_result'
full_project_result_dir = curr_dir+project_result_dir+'\\'
input_filename_core = 'disease_model_merged_data_vFinal_p'
output_filename_core = 'Graph'
start_prediction_date = datetime.datetime(2020, 5, 26) # Setting
def main(graph_switch=False, save_switch=False):
pandas_output_setting()
cols = {'Predicted cumulative count of infected cases in Alberta (Demo only)': 'cumulative_cases',
'Predicted cumulative count of deaths in Alberta (Demo only)': 'cumulative_deaths'
}
df_master = get_merged_project_data()
df_master['Date'] = pd.to_datetime(df_master['Date'])
df_id_list = df_master['Run'].unique().tolist()
if graph_switch:
for key, col_label in cols.items():
# Draw graphs
fig, ax = plt.subplots()
for df_id in df_id_list:
df_master_sliced = df_master[df_master['Run'] == df_id]
row_index = df_master_sliced[df_master_sliced['Date'] == \
start_prediction_date].index.item()
ax.plot(df_master_sliced['Date'][row_index:], df_master_sliced[col_label][row_index:])
ax.plot(df_master_sliced['Date'][:row_index+1], df_master_sliced[col_label][:row_index+1])
# Set formats and labels
date_format = DateFormatter('%Y-%m')
ax.xaxis.set_major_formatter(date_format)
ax.xaxis.set_minor_formatter(date_format)
ax.set_title('Title: {}'.format(key), fontsize=12)
plt.xlabel('Time in Year and Month')
plt.ylabel('Count')
plt.figtext(0.99, 0.005, '(Simulated predictions started: {})'.format(start_prediction_date),
horizontalalignment='right')
fig.autofmt_xdate()
if save_switch:
output_file_name = '{}{}_{}.png'.format(full_project_result_dir, output_filename_core, key)
plt.savefig(output_file_name)
plt.show()
### Helper functions ###
def pandas_output_setting():
'''Set pandas output display setting'''
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 170)
pd.options.mode.chained_assignment = None # default='warn'
def get_merged_project_data():
dfs_merged = None
for filename in os.listdir(path=full_project_result_dir):
if (input_filename_core in filename):
path = full_project_result_dir+filename
df = pd.read_csv(path, encoding='utf-8', low_memory=False)
if dfs_merged is None:
dfs_merged = df
else:
dfs_merged =
|
pd.concat([dfs_merged, df])
|
pandas.concat
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import inspect
import numpy as np
import pandas as pd
import pyspark
import databricks.koalas as ks
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.indexes import _MissingPandasLikeIndex, _MissingPandasLikeMultiIndex
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class IndexesTest(ReusedSQLTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
@property
def kdf(self):
return ks.from_pandas(self.pdf)
def test_index(self):
for pdf in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D', periods=10)),
pd.DataFrame(np.random.randn(10, 5),
columns=list('abcde')).set_index(['a', 'b'])]:
if LooseVersion(pyspark.__version__) < LooseVersion('2.4'):
# PySpark < 2.4 does not support struct type with arrow enabled.
with self.sql_conf({'spark.sql.execution.arrow.enabled': False}):
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index, pdf.index)
else:
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index, pdf.index)
def test_to_series(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name='a'), pidx.to_series(name='a'))
pidx = self.pdf.set_index('b', append=True).index
kidx = self.kdf.set_index('b', append=True).index
if LooseVersion(pyspark.__version__) < LooseVersion('2.4'):
# PySpark < 2.4 does not support struct type with arrow enabled.
with self.sql_conf({'spark.sql.execution.arrow.enabled': False}):
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name='a'), pidx.to_series(name='a'))
else:
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name='a'), pidx.to_series(name='a'))
def test_index_names(self):
kdf = self.kdf
self.assertIsNone(kdf.index.name)
idx =
|
pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
|
pandas.Index
|
from collections import defaultdict
from pandas import Series, pivot_table, DataFrame, notnull
from typing import List, Dict, Tuple, Union, Callable, Optional
from ux.actions.user_action import UserAction
from ux.sequences.action_sequence import ActionSequence
from ux.actions.action_template import ActionTemplatePair
from ux.compound_types import StrPair
def count_action_transitions(
action_sequences: List[ActionSequence]
) -> Dict[ActionTemplatePair, int]:
"""
Count the transitions from each action to each other action in the given
sequences.
:param action_sequences: List of IActionSequence to count transitions in.
:return: Dictionary of {(from, to) => count}
"""
transitions = defaultdict(int)
# count transitions
for sequence in action_sequences:
for a in range(len(sequence) - 1):
from_action = sequence[a].template()
to_action = sequence[a + 1].template()
transitions[(from_action, to_action)] += 1
return transitions
def count_location_transitions(
action_sequences: List[ActionSequence]
) -> Dict[StrPair, int]:
"""
Count the transitions from each location to each other location in actions
in the given sequences.
:param action_sequences: List of IActionSequence to count transitions in.
"""
transitions = defaultdict(int)
# count transitions
for sequence in action_sequences:
for action in sequence:
source = action.source_id
target = action.target_id
if notnull(source) and
|
notnull(target)
|
pandas.notnull
|
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax =
|
Index(obj[key],name=key)
|
pandas.core.index.Index
|
import sys
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy import create_engine
# import sqlite3
def load_data(messages_filepath, categories_filepath):
'''
Function to load data and merge them into one file
Args:
messages_filepath: Filepath to load the messages.csv
categories_filepath: Filepath to load the categories.csv
Output:
df: combined dataFrame
'''
messages = pd.read_csv(messages_filepath)
categories=pd.read_csv(categories_filepath)
df = messages.merge(categories,how='outer',on=['id'])
return df
def clean_data(df):
'''
Function to clean the combined DataFrame and have columns for each category with binary inputs
Args: df: Merged dataFrame
Output:
df : Clean dataFrame
'''
categories = pd.DataFrame(df['categories'].str.split(';',expand=True))
row = categories.iloc[0,:]
category_colnames =row.apply(lambda x:x[:-2])
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].apply(lambda x:str(x)[-1])
# convert column from string to numeric
categories[column] = categories[column].apply(lambda x:int(x))
#converting categories into binary format of 1's and 0's
categories=categories.apply(lambda x:(x!=0).astype(int))
# drop the original categories column from `df`
df=df.drop(['categories'],axis=1,inplace=False)
# concatenate the original dataframe with the new `categories` dataframe
df =
|
pd.concat([df,categories],axis=1)
|
pandas.concat
|
import base64
import numpy as np
import os
import pandas as pd
import streamlit as st
from streamlit.uploaded_file_manager import UploadedFile
import streamlit.components.v1 as components
import json
from datetime import datetime
from pathlib import Path
from .repo import get_all_commits
DATE_COLUMN = 'last_updated'
### COINGECKO
@st.experimental_memo
def load_coingecko_data():
# coin_file_path = "streamlit_app/coin_info/coin_socials.json"
coin_file_path = './data/coin_socials.json'
pkl_path = './data/merged_on_name_cg_agg.pkl'
print(f"reading file from {coin_file_path}")
try:
with open(coin_file_path, "r") as file:
jj = json.load(file)
coin_social_data_df = pd.DataFrame.from_dict(jj,orient = "index")
print(f"Read df with {coin_social_data_df.shape} rows ")
# lowercase = lambda x: str(x).lower()
# data.rename(lowercase, axis='columns', inplace=True)
coin_social_data_df[DATE_COLUMN] = pd.to_datetime(coin_social_data_df[DATE_COLUMN])
# Add in repo info from electricCapital
additional_repo_info_df = pd.read_pickle(pkl_path)
# st.dataframe(additional_repo_info_df)
# print("Shape")
coin_social_data_df_merged = pd.merge(coin_social_data_df, additional_repo_info_df, left_on = 'name', right_on= 'name_coingecko', how = 'left')
return coin_social_data_df_merged
except Exception as e:
# Notify the reader that the data was successfully loaded.
st.sidebar.error('Error loading data :(')
return None
@st.experimental_memo
def get_one_token_latest(coin_choice):
import pandas as pd
from pycoingecko import CoinGeckoAPI
import time
pkl_path = './data/merged_on_name_cg_agg.pkl'
# coin_data = {}
cg = CoinGeckoAPI()
coin_json = cg.get_coin_by_id(id=coin_choice, localization = False)
required_cols = ['id', 'symbol', 'name', 'asset_platform_id', 'platforms',
'block_time_in_minutes', 'hashing_algorithm', 'categories',
'public_notice', 'additional_notices', 'description', 'links', 'image',
'country_origin', 'genesis_date', 'sentiment_votes_up_percentage',
'sentiment_votes_down_percentage', 'market_cap_rank', 'coingecko_rank',
'coingecko_score', 'developer_score', 'community_score',
'liquidity_score', 'public_interest_score', 'market_data',
'community_data', 'developer_data', 'public_interest_stats',
'status_updates', 'last_updated', 'tickers', 'ico_data',
'contract_address']
df = pd.DataFrame.from_dict({coin_choice: coin_json}, orient="index")
for col in required_cols:
if col not in df.columns:
df[col] = np.nan
additional_repo_info_df = pd.read_pickle(pkl_path)
# st.dataframe(additional_repo_info_df)
# print("Shape")
coin_social_data_df_merged =
|
pd.merge(df, additional_repo_info_df, left_on = 'name', right_on= 'name_coingecko', how = 'left')
|
pandas.merge
|
import numpy as np
from typing import Callable, Tuple
from dataprocessing import F1Dataset
import pandas as pd
import GPy
import datetime
processed_laps = dict()
processed_pits = dict()
def get_or_load_data(year: int) -> Tuple[pd.DataFrame, pd.DataFrame]:
# return data if it was already processed
global processed_laps
global processed_pits
if year in processed_pits:
print("already processed")
return processed_laps[year], processed_pits[year]
data = F1Dataset('data')
races = data.races
years_races = races.loc[races['year'] == year][['raceId', 'circuitId']]
# load qualification data,obtain fastest quali time at each race for normalisation purposes
qualis = data.qualifying
qrs = qualis.merge(years_races, on='raceId')
qrs = qrs.loc[~pd.isnull(qrs['q3'])]
top_time_idx = qrs.groupby(['raceId'])['q3'].transform(min) == qrs['q3']
top_times = qrs[top_time_idx][['q3', 'raceId']]
top_times['q3'] =
|
pd.to_datetime(top_times['q3'], format='%M:%S.%f')
|
pandas.to_datetime
|
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import sys
from math import sqrt, log
from scipy.stats import zscore
from scipy.stats import percentileofscore
from collections import Counter
def get_indication(df):
indication =[]
for sample in df.index:
name = sample[sample.rfind('IPI')+3:sample.find('.')-3]
if name == 'MELB':
name = 'MEL'
indication.append(name.strip())
return indication
def make_plot(s,title,MIN,MAX,comp):
'''
input: series of flow scores
Min Max for y axis
output: boxplot of flow scores by indication sorted for highest median to lowest
'''
df = s.to_frame(name='Population_Percent')
sns.set_style("whitegrid")
f, ax = plt.subplots(figsize=(20,10))
indication = get_indication(df)
print (len(indication))
colors = {
'LUNG':'MidnightBlue',
'KID':'Red',
'HNSC':'OliveDrab',
'CRC':'DarkTurquoise',
'GYN':'Coral',
'BLAD':'Gold',
'MEL':'Maroon',
'HEP':'DodgerBlue',
'SRC':'DeepPink',
'PNET':'DarkOrchid',
'PDAC' : 'Yellow',
'GALL':'Green',
'ADR':'Purple',
'GSTR':'Yellow',
'SI' : 'Black',
'GBM': 'Grey'
}
df['Indication'] = indication
my_order = df.groupby(by=["Indication"]).median().iloc[::-1]
my_order = my_order.sort_values(by=["Population_Percent"],ascending=False).index
print (my_order.tolist())
ax = sns.boxplot(x="Indication", y="Population_Percent", data=df, whis=np.inf,palette=colors,order=my_order,saturation=0.45)
ax = sns.swarmplot(x='Indication',y='Population_Percent',data=df, size =10, palette=colors,order=my_order,edgecolor='gray')
plt.ylabel('Score',fontsize= 30, fontweight='bold')
plt.xlabel('')
plt.ylim((MIN,MAX))
plt.setp(ax.yaxis.get_majorticklabels(), fontsize=15, fontweight='bold',rotation=0)
plt.setp(ax.xaxis.get_majorticklabels(), fontsize=25, fontweight='bold',rotation=0)
plt.title(comp+' score from Flow Cytometry' ,fontsize=20,fontweight='bold')
#plt.show()
plt.savefig(title+'.pdf',format='pdf',dpi=500)
plt.savefig(title+'.svg',format='svg',dpi=500)
plt.close()
return my_order
def main():
title="Flow_score_Sept10_2020"
for comp in ['Tcell','Myeloid','Stroma']:
df =
|
pd.read_csv('../files_used_for_plots/feature_flow_population_percents/'+comp+'_flow_population_percents.tsv',sep='\t',index_col=[0],header=0,skipinitialspace=True)
|
pandas.read_csv
|
"""Script to get data for core functions."""
from BackEnd import core
from timeit import default_timer as timer
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import os
sns.set_theme(color_codes=True)
# time it takes creating share packages with variable secret length
number_of_bytes = range(1, 50)
number_of_packages = 5
number_threshold = 5
data_points = None
for i in range(0, len(number_of_bytes)):
mock_secret = os.urandom(number_of_bytes[i])
start = timer()
core.split_large_secret_into_share_packages(mock_secret, number_threshold, number_of_packages)
stop = timer()
if i == 0:
data_points = np.array([[number_of_bytes[i], stop - start]])
else:
data_points = np.append(data_points, np.array([[number_of_bytes[i], stop - start]]), axis=0)
print(pd.DataFrame(data_points))
dataframe = pd.DataFrame(data=data_points[:, 1:])
sns.lineplot(data=dataframe)
plt.title('Large Secret: Time per length of secret in bytes.')
plt.legend('Data')
plt.ylabel('Time')
plt.xlabel('Bytes')
plt.show()
x = data_points[0:, 0]
y = data_points[0:, 1]
x, y = pd.Series(x), pd.Series(y)
sns.regplot(x=x, y=y)
plt.title('Large Secret: Time per length of secret in bytes.')
plt.legend('Data')
plt.ylabel('Time')
plt.xlabel('Bytes')
plt.show()
# time it takes creating share packages with variable package number
number_of_bytes = 32
number_threshold = 5
number_of_packages = range(2, 100)
data_points = None
for i in range(0, len(number_of_packages)):
mock_secret = os.urandom(number_of_bytes)
start = timer()
core.split_large_secret_into_share_packages(mock_secret, number_threshold, number_of_packages[i])
stop = timer()
if i == 0:
data_points = np.array([[number_of_packages[i], stop - start]])
else:
data_points = np.append(data_points, np.array([[number_of_packages[i], stop - start]]), axis=0)
print(pd.DataFrame(data_points))
dataframe = pd.DataFrame(data=data_points[:, 1:])
sns.lineplot(data=dataframe)
plt.title('Large Secret: Time per number of packages created.')
plt.legend('Data')
plt.ylabel('Time')
plt.xlabel('Packages')
plt.show()
x = data_points[0:, 0]
y = data_points[0:, 1]
x, y = pd.Series(x), pd.Series(y)
sns.regplot(x=x, y=y)
plt.title('Large Secret: Time per number of packages created.')
plt.legend('Data')
plt.ylabel('Time')
plt.xlabel('Packages')
plt.show()
plt.show()
# time it takes creating share packages with variable threshold number
number_of_bytes = 32
number_threshold = range(2, 100)
number_of_packages = 5
data_points = None
for i in range(0, len(number_threshold)):
mock_secret = os.urandom(number_of_bytes)
start = timer()
core.split_large_secret_into_share_packages(mock_secret, number_threshold[i], number_of_packages)
stop = timer()
if i == 0:
data_points = np.array([[number_threshold[i], stop - start]])
else:
data_points = np.append(data_points, np.array([[number_threshold[i], stop - start]]), axis=0)
print(pd.DataFrame(data_points))
dataframe = pd.DataFrame(data=data_points[:, 1:])
sns.lineplot(data=dataframe)
plt.title('Large Secret: Time per threshold of shamir algorithm.')
plt.legend('Data')
plt.ylabel('Time')
plt.xlabel('Threshold')
plt.show()
x = data_points[0:, 0]
y = data_points[0:, 1]
x, y = pd.Series(x), pd.Series(y)
sns.regplot(x=x, y=y, color="g")
plt.title('Large Secret: Time per threshold of shamir algorithm.')
plt.legend('Data')
plt.ylabel('Time')
plt.xlabel('Threshold')
plt.show()
# time it takes to restore packages from minimum threshold
number_of_bytes = 32
number_threshold = range(2, 50)
number_of_packages = 100
data_points = None
for i in range(0, len(number_threshold)):
mock_secret = os.urandom(number_of_bytes)
packages = core.split_large_secret_into_share_packages(mock_secret, number_threshold[i], number_of_packages)
start = timer()
core.recover_large_secret(packages)
stop = timer()
if i == 0:
data_points = np.array([[number_threshold[i], stop - start]])
else:
data_points = np.append(data_points, np.array([[number_threshold[i], stop - start]]), axis=0)
print(
|
pd.DataFrame(data_points)
|
pandas.DataFrame
|
import os
try:
import fool
except:
print("็ผบๅฐfoolๅทฅๅ
ท")
import math
import pandas as pd
import numpy as np
import random
import tensorflow as tf
import re
np.random.seed(1)
def add2vocab(path,word):
vocab_data=pd.read_csv(path)
idx_to_chars=list(vocab_data['vocabulary'])+[word]
df_data = pd.DataFrame(idx_to_chars, columns=['vocabulary'])
df_data.to_csv(path,index=0)
def get_corpus_indices(data,chars_to_idx,mlm=False,nsp=False):
"""
่ฝฌๅๆ่ฏๅบ็ดขๅผ
"""
corpus_indices=[]
keys=chars_to_idx.keys()
#print(data)
for d in data:
if nsp==True:
corpus_chars=d
corpus_chars_idx=[]
if len(d)>0 and len(d[0])==1:
corpus_chars=['[cls]']+corpus_chars
index=-1
for word in corpus_chars:
index=index+1
if word not in keys:
corpus_chars[index]='[mask]'#็จ[mask]ๆฟๆขไธๅญๅจ่ฏๅบไธญ็ๅ่ฏ
corpus_chars_idx=[chars_to_idx[char] for char in corpus_chars]
find_end=np.where(np.asarray(corpus_chars_idx)==chars_to_idx['ใ'])
for i in range(find_end[0].shape[0]):
corpus_chars_idx.insert(find_end[0][i]+i+1,chars_to_idx['[sep]'])
else:
corpus_chars_idx=[chars_to_idx[char] for char in corpus_chars]
elif mlm==True:
d=d.replace('\n','').replace('\r','').replace(' ','').replace('\u3000','')
corpus_chars=list(d)
corpus_chars_idx=[]
#print(2)
'''
index=-1
for word in corpus_chars:
index=index+1
if word not in keys:
corpus_chars[index]='[mask]'#็จ[mask]ๆฟๆขไธๅญๅจ่ฏๅบไธญ็ๅ่ฏ
'''
index=-1
for word in corpus_chars:
index=index+1
if word not in keys:
corpus_chars[index]='[mask]'#็จ[mask]ๆฟๆขไธๅญๅจ่ฏๅบไธญ็ๅ่ฏ
corpus_chars_idx=[chars_to_idx[char] for char in corpus_chars]
else:
corpus_chars=d
if isinstance(corpus_chars,(list)):#corpus_charsๅฟ
้กปๆฏๅ่กจlist
index=-1
for word in corpus_chars:
index=index+1
if word not in keys:
corpus_chars[index]='[mask]'#็จ[mask]ๆฟๆขไธๅญๅจ่ฏๅบไธญ็ๅ่ฏ
else:
corpus_chars=[corpus_chars]#่ฝฌๅๆlist
corpus_chars_idx=[chars_to_idx[char] for char in corpus_chars]
corpus_indices.append(corpus_chars_idx)#่ฏญๆ็ดขๅผ๏ผๆข่ฏปๅ
ฅ็ๆๆฌ๏ผๅนถ้่ฟchars_to_idx่ฝฌๅๆ็ดขๅผ
return corpus_indices
def data_format(data,labels):
'''
ๆฐๆฎๆ ผๅผๅ๏ผๆๆดไธชๆนๆฌก็ๆฐๆฎ่ฝฌๅๆๆๅคงๆฐๆฎ้ฟๅบฆ็ๆฐๆฎ็ธๅ็ๆฐๆฎ้ฟๅบฆ๏ผไปฅ-1่ฟ่กๅกซๅ
๏ผ
'''
def format_inner(inputs,max_size):
new_data=[]
for x_t in inputs:
if(abs(len(x_t)-max_size)!=0):
for i in range(abs(len(x_t)-max_size)):
x_t.extend([-1])
new_data.append(tf.reshape(x_t,[1,-1]))
return new_data
max_size=0
new_data=[]
mask=[]
masks=[]
new_labels = []
#่ทๅๆๅคงๆฐๆฎ้ฟๅบฆ
for x in data:
if(max_size<len(x)):
max_size=len(x)
#ๅพๅฐmasks
for d in data:
for i in range(max_size):
if(i<len(d)):
mask.append(1.0)
else:
mask.append(0.0)
masks.append(tf.reshape(mask,[1,-1]))
mask=[]
#print(masks,"max_size")
if data is not None:
new_data=format_inner(data,max_size)#ๆ ผๅผๅๆฐๆฎ
if labels is not None:
new_labels=format_inner(labels,max_size) #ๆ ผๅผๅๆ ็ญพ
#print(new_labels)
#print(new_data)
return new_data,new_labels,masks
def get_data(data,labels,chars_to_idx,label_chars_to_idx,batch_size,char2idx=True,mlm=False,nsp=False):
'''
function:
ไธไธชๆนๆฌกไธไธชๆนๆฌก็yieldๆฐๆฎ
parameter:
data:้่ฆๆนๆฌกๅ็ไธ็ปๆฐๆฎ
labels:dataๅฏนๅบ็ๆ
ๆ็ฑปๅ
chars_to_idx;่ฏๆฑๅฐ็ดขๅผ็ๆ ๅฐ
label_chars_to_idx;ๆ ็ญพๅฐ็ดขๅผ็ๆ ๅฐ
batch_size;ๆนๆฌกๅคงๅฐ
'''
num_example=math.ceil(len(data)/batch_size)
example_indices=list(range(num_example))
random.shuffle(example_indices)
#print(data,"get_data")
for i in example_indices:
start=i*batch_size
if start >(len(data)-1):
start=(len(data)-1)
end=i*batch_size+batch_size
if end >(len(data)-1):
end=(len(data)-1)+1
X=data[start:end]
Y=labels[start:end]
#print(chars_to_idx,"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
#print(char2idx," ",mlm," ",nsp,"่ฟ1")
if char2idx==True:
#print("่ฟ")
X=get_corpus_indices(X,chars_to_idx,mlm=mlm,nsp=nsp)
if mlm==True:
Y=X
else:
Y=get_corpus_indices(Y,label_chars_to_idx,mlm=mlm,nsp=nsp)
#print(X,"XXXXXX")
yield X,Y #ๅชๆฏ็ดขๅผๅ็ๆๆฌ๏ผไธ้ฟๅบฆไธไธ
def nsp_vocab(folder,name):
path=folder+"\\"+name
df = pd.read_csv(path)
data = list(df["evaluation"])
#print(len(data))
datas=[]
labels=[]
for i in range(len(data)):
if data[i].find("ใ")==-1:
continue
#print(data[i])
x,y=build_sample_nsp(data[i])
if x==-1:
continue
datas.extend(x)
labels.extend(y)
#print(datas[-1])
#print(labels[-1])
datas=[list(d) for d in datas]
df_data = pd.DataFrame(datas)
df_data.to_csv(folder+"\\"+"nsp_data.csv",index=0,header=0)#ไธๅญๅๅๅ่กๅ
df_label = pd.DataFrame(labels)
df_label.to_csv(folder+"\\"+"nsp_label.csv",index=0,header=0)#ไธๅญๅๅๅ่กๅ
#print(datas[-1][:])
def nsp_load_data(folder,data_name,label_name):
path_data=folder+"\\"+data_name
path_name=folder+"\\"+label_name
df_data = pd.read_csv(path_data,header=None,low_memory=False)
df_label = pd.read_csv(path_name,header=None,low_memory=False)
data=df_data.values.tolist()
#print(data[0:10][:])
data=[[d for d in sentence if pd.notna(d)] for sentence in data ]#ๅป้คnan
#print(df_label,"####")
label=df_label.values.tolist()
return data,label
def build_sample_nsp(data):
'''
function:
ไบง็็ฑไธคๅฅ่ฏ็ปๅ็ๆฐๆฎ๏ผlabel่กจ็คบ่ฟไธคๅฅ่ฏๆฏๅฆๆฏๅจๅๆๆฌ่ฟๅจไธ่ตท็ใ
ๆฏๅฆไธไธชๆๆกฃๆ3ๅฅ่ฏ๏ผไธบ๏ผ(ๆ1)(ๆ2)(ๆ3)๏ผ่ฟๅจไธ่ตท็๏ผๆญฃๆ ทๆฌ๏ผๆฏ(ๆ1)(ๆ2)๏ผ(ๆ2)(ๆ3)๏ผ่ดๆ ทๆฌๆฏ(ๆ1)(ๆ3)๏ผ(ๆ3)(ๆ1)๏ผ(ๆ3)(ๆ2)๏ผ(ๆ2)(ๆ1)ใ
ไธบไบไฟ่ฏๆญฃ่ดๆ ทๆฌๆฐ้ไธ่ด๏ผๆไปฅ่ฏฅๅฝๆฐ่ฟๅๆๆ็ๆ ทๆฌไธญ๏ผๆญฃ่ดๆ ทๆฌๅๅ 50%
parameter:
data:ไธไธชๆๆกฃใไธไธชๆๆกฃๅ
ๅซๅพๅคๅฅๅญ
'''
def dw_nsp(sentence):
front=0
back=1
pos_data=[]
pos_label=[]
neg_data=[]
neg_label=[]
if(len(sentence)>1):
while back<len(sentence):
#print(back)
pos_data.append(sentence[front]+"ใ"+sentence[back]+"ใ")
pos_label.append("ๆญฃ้ข")#1ไปฃ่กจๆญฃๆ ทๆฌ
back=back+1
front=front+1
#print(len(pos_data),"pos_data")
for i in range(len(pos_data)):
front=random.randint(0,len(sentence)-1)
back=random.randint(0,len(sentence)-1)
#print(back)
while (back-front)==1:
front=random.randint(0,len(sentence)-1)
back=random.randint(0,len(sentence)-1)
#print(back)
neg_data.append(sentence[front]+"ใ"+sentence[back]+"ใ")
neg_label.append("่ด้ข")#0ไปฃ่กจ่ดๆ ทๆฌ
return pos_data+neg_data,pos_label+neg_label
pattern=r'[ใ|๏ผ]'#ไปฅๅฅๅทๅๅฒๆๆๆๆฌ
#print(data)
corpus=data
sentence=re.split(pattern,corpus)
if len(sentence[-1])==0:
sentence=sentence[0:-1]#็ฑไบๆๅไธไธชๅ
็ด ๆฏ็ฉบ็ๆไปฅๆๅฎๅป้ค
#print(sentence)
#print(sentence)
if len(sentence)>2:
inputs,labels=dw_nsp(sentence)
#print(inputs)
#print(labels)
#random.shuffle(zip(inputs,labels))#ๅๅผๆไนฑไผๆ้ฎ้ข
inputs_labels = list(zip(inputs, labels))
random.shuffle(inputs_labels)
inputs[:], labels[:] = zip(*inputs_labels)
return inputs,labels
else:
return -1,-1
def chinese_token_vocab(path):
df = pd.read_csv(path, header=None)
chinese_token=list(df.iloc[:,-1])+["ใ","๏ผ","๏ผ","๏ผ","ใ","๏ผ","๏ผ","โ","โ","โ","โ","๏ผ","๏ผ","ใ","ใ","โ","โฆ","ยท","ใ","ใ","[","]",".","0","1","2","3","4","5","6","7","8","9"]+['[cls]','[sep]','[mask]']#ๅ ๅ
ฅไธไบๅธธ่งไธญๆ็ฌฆๅท
df_data=pd.DataFrame(chinese_token, columns=['token'])
df_data=pd.DataFrame(df_data['token'].unique(), columns=['token'])
df_data.to_csv("data\\chinese_token.csv",index=0)
def bild_vocab_token(path):
'''
่ฏปๅtokenๆฐๆฎ้
'''
df = pd.read_csv(path)
chinese_token=list(df['token'])
char_to_idx=dict([(char,i) for i,char in enumerate(chinese_token)])
vocab_size=len(chinese_token)
return chinese_token,char_to_idx,vocab_size
def ner_vocab(folder,name):#tokenize
'''
func:
ๆๆฐๆฎ้ๆๅปบๆไธ่กไปฃ่กจไธไธชๅฅๅญใdataๅlabelๅๅผๆไธคไธชๆๆกฃใ
'''
name_1="ner_label_num_"+name+".csv"
name_2="ner_data_"+name+".csv"
name_3="ner_label_"+name+".csv"
name_4="tokenize_label_num_"+name+".csv"
name_5="tokenize_label_"+name+".csv"
path = folder+"\\"
df = pd.read_csv(folder+"\\"+name,sep='\t', header=None,encoding='utf-8')
corpus=list(df[0])
corpus_label=list(df[1])
token=[]
tokenize=[]
tokenizes=[]
tokens=[]
label=[]
labels=[]
#ๆๅปบๆฐๆฎ้๏ผๆtokenๅๅฏนๅบ็labelๅญๅฐไธไธชๆไปถ้
for i in range(len(corpus)):
chars=list(corpus[i])
token.append(chars[0])
#print(chars[0],i)
if(int(chars[1])>0):
tokenize.append("I")
else:
tokenize.append("B")
label.append(corpus_label[i])
#tokenize.append(i[1])
if chars[0]=="ใ":#ไปฅๅฅๅทไปฃ่กจไธๅฅ่ฏ็ปๆ
tokens.append(token)
labels.append(label)
tokenizes.append(tokenize)
#token.clear()
#label.clear()
token=[]
label=[]
tokenize=[]
#data={"token":token,"tokenize":tokenize,"ner_label":ner_label}
#print(tokens)
df_data = pd.DataFrame(tokens)
df_data.to_csv(path+name_2,index=0,header=0)#ไธๅญๅๅๅ่กๅ
df_label = pd.DataFrame(labels)
df_label.to_csv(path+name_3,index=0,header=0)#ไธๅญๅๅๅ่กๅ
#print(tokenizes)
df_label_tokenize = pd.DataFrame(tokenizes)
df_label_tokenize.to_csv(path+name_5,index=0,header=0)#ไธๅญๅๅๅ่กๅ
#ๆๅปบcharๅฐidx็ๆ ๅฐ๏ผๅนถไฟๅญๅค็จ
ner_label_num=list(df[1].unique())#+["<START>","<END>"]
df_label = pd.DataFrame(ner_label_num, columns=['label'])
df_label.to_csv(path+name_1,index=0)
#ๆๅตๅฅ็listๆดๅๆไธไธชlist
T=[]
for t in tokenizes:
T.extend(t)
df_label_tnum = pd.DataFrame(list(set(T)), columns=['label'])#+["<START>","<END>"]
df_label_tnum.to_csv(path+name_4,index=0)
def build_vocab_label(folder,name):
df = pd.read_csv(folder+"\\"+name)
#print(df)
try:
label_idx_to_char=list(df["label"].unique())
except:
label_idx_to_char=list(df.iloc[:,0].unique())
label_char_to_idx=dict([(char,i) for i,char in enumerate(label_idx_to_char)])
label_vocab_size=len(label_idx_to_char)
return label_idx_to_char,label_char_to_idx,label_vocab_size
def ner_load_data(folder,name,low_memory=False):
path_1=folder+"\\"+name
df = pd.read_csv(path_1,header=None,low_memory=False)
data=df.values.tolist()
data=[[d for d in sentence if
|
pd.notna(d)
|
pandas.notna
|
import quandl
mydata = quandl.get("YAHOO/INDEX_DJI", start_date="2005-12-01", end_date="2005-12-05")
import pandas as pd
authtoken = '<PASSWORD>'
def get_data_quandl(symbol, start_date, end_date):
data = quandl.get(symbol, start_date=start_date, end_date=end_date, authtoken=authtoken)
return data
def generate_features(df):
""" Generate features for a stock/index based on historical price and performance
Args:
df (dataframe with columns "Open", "Close", "High", "Low", "Volume", "Adjusted Close")
Returns:
dataframe, data set with new features
"""
df_new =
|
pd.DataFrame()
|
pandas.DataFrame
|
import requests
import pandas as pd
import numpy as np
import configparser
from datetime import timedelta, datetime
from dateutil import relativedelta, parser, rrule
from dateutil.rrule import WEEKLY
class whoop_login:
'''A class object to allow a user to login and store their authorization code,
then perform pulls using the code in order to access different types of data'''
def __init__(self, auth_code=None, whoop_id=None,current_datetime=datetime.utcnow()):
self.auth_code=auth_code
self.whoop_id=whoop_id
self.current_datetime=current_datetime
self.start_datetime=None
self.all_data=None
self.all_activities=None
self.sport_dict=None
self.all_sleep=None
self.all_sleep_events=None
def pull_api(self, url,df=False):
auth_code=self.auth_code
headers={'authorization':auth_code}
pull=requests.get(url,headers=headers)
if pull.status_code==200 and len(pull.content)>1:
if df:
d=pd.json_normalize(pull.json())
return d
else:
return pull.json()
else:
return "no response"
def pull_sleep_main(self,sleep_id):
athlete_id=self.whoop_id
sleep=self.pull_api('https://api-7.whoop.com/users/{}/sleeps/{}'.format(athlete_id,sleep_id))
main_df=pd.json_normalize(sleep)
return main_df
def pull_sleep_events(self,sleep_id):
athlete_id=self.whoop_id
sleep=self.pull_api('https://api-7.whoop.com/users/{}/sleeps/{}'.format(athlete_id,sleep_id))
events_df=pd.json_normalize(sleep['events'])
events_df['id']=sleep_id
return events_df
def get_authorization(self,user_ini):
'''
Function to get the authorization token and user id.
This must be completed before a user can query the api
'''
config=configparser.ConfigParser()
config.read(user_ini)
username=config['whoop']['username']
password=config['whoop']['password']
headers={
"username": username,
"password": password,
"grant_type": "password",
"issueRefresh": False}
auth = requests.post("https://api-7.whoop.com/oauth/token", json=headers)
if auth.status_code==200:
content=auth.json()
user_id=content['user']['id']
token=content['access_token']
start_time=content['user']['profile']['createdAt']
self.whoop_id=user_id
self.auth_code='bearer ' + token
self.start_datetime=start_time
print("Authentication successful")
else:
print("Authentication failed - please double check your credentials")
def get_keydata_all(self):
'''
This function returns a dataframe of WHOOP metrics for each day of WHOOP membership.
In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information
'''
if self.start_datetime:
if self.all_data is not None:
## All data already pulled
return self.all_data
else:
start_date=parser.isoparse(self.start_datetime).replace(tzinfo=None)
end_time='T23:59:59.999Z'
start_time='T00:00:00.000Z'
intervals=rrule.rrule(freq=WEEKLY,interval=1,until=self.current_datetime, dtstart=start_date)
date_range=[[d.strftime('%Y-%m-%d') + start_time,
(d+relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d') + end_time] for d in intervals]
all_data=pd.DataFrame()
for dates in date_range:
cycle_url='https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(self.whoop_id,
dates[1],
dates[0])
data=self.pull_api(cycle_url,df=True)
all_data=pd.concat([all_data,data])
all_data.reset_index(drop=True,inplace=True)
## fixing the day column so it's not a list
all_data['days']=all_data['days'].map(lambda d: d[0])
all_data.rename(columns={"days":'day'},inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols=['qualityDuration','needBreakdown.baseline','needBreakdown.debt','needBreakdown.naps',
'needBreakdown.strain','needBreakdown.total']
for sleep_col in sleep_cols:
all_data['sleep.' + sleep_col]=all_data['sleep.' + sleep_col].astype(float).apply(lambda x: np.nan if np.isnan(x) else x/60000)
## Making nap variable
all_data['nap_duration']=all_data['sleep.naps'].apply(lambda x: x[0]['qualityDuration']/60000 if len(x)==1 else(
sum([y['qualityDuration'] for y in x if y['qualityDuration'] is not None])/60000 if len(x)>1 else 0))
all_data.drop(['sleep.naps'],axis=1,inplace=True)
## dropping duplicates subsetting because of list columns
all_data.drop_duplicates(subset=['day','sleep.id'],inplace=True)
self.all_data=all_data
return all_data
else:
print("Please run the authorization function first")
def get_activities_all(self):
'''
Activity data is pulled through the get_keydata functions so if the data pull is present, this function
just transforms the activity column into a dataframe of activities, where each activity is a row.
If it has not been pulled, this function runs the key data function then returns the activity dataframe'''
if self.sport_dict:
sport_dict=self.sport_dict
else:
sports=self.pull_api('https://api-7.whoop.com/sports')
sport_dict={sport['id']:sport['name'] for sport in sports}
self.sport_dict=self.sport_dict
if self.start_datetime:
## process activity data
if self.all_data is not None:
## use existing
data=self.all_data
else:
## pull all data to process activities
data=self.get_keydata_all()
## now process activities data
act_data=pd.json_normalize(data[data['strain.workouts'].apply(len)>0]['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper','during.lower']]=act_data[['during.upper','during.lower']].apply(pd.to_datetime)
act_data['total_minutes']=act_data.apply(lambda x: (x['during.upper']-x['during.lower']).total_seconds()/60.0,axis=1)
for z in range(0,6):
act_data['zone{}_minutes'.format(z+1)]=act_data['zones'].apply(lambda x: x[z]/60000.)
act_data['sport_name']=act_data.sportId.apply(lambda x: sport_dict[x])
act_data['day']=act_data['during.lower'].dt.strftime('%Y-%m-%d')
act_data.drop(['zones','during.bounds'],axis=1,inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities=act_data
return act_data
else:
print("Please run the authorization function first")
def get_sleep_all(self):
'''
This function returns all sleep metrics in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents one night of sleep
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data=self.all_data
else:
## pull timeframe data
data=self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep is not None:
## All sleep data already pulled
return self.all_sleep
else:
sleep_ids=data['sleep.id'].values.tolist()
sleep_list=[int(x) for x in sleep_ids if
|
pd.isna(x)
|
pandas.isna
|
from __future__ import print_function
# from __future__ import absolute_import
from keras.optimizers import SGD
from keras.regularizers import l2
from keras.models import Sequential, Model, model_from_yaml
from keras.utils import plot_model
from keras.layers import merge, Dense, Dropout, Flatten, concatenate, add, Concatenate, subtract, average, dot
import numpy as np
import scipy
import sys
import os
import argparse
from random import randint, uniform
import time
import matplotlib.pyplot as plt
from keras.losses import sparse_categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as t
import random
import cv2
import keras.backend as K
import tensorflow as tf
import pandas as pd
from keras_preprocessing import image
import random as rn
np.random.seed(42)
rn.seed(12345)
tf.set_random_seed(1234)
# -----------------------------------------------------------------------------------------------
# import the essential functions required for computation
# sys.path.insert(0, os.path.expanduser('~//CNN_networks'))
# sys.export PYTHONPATH=/home/yaurehman2/PycharmProjects/face_anti_sp_newidea
print(sys.path)
from cnn_networks.VGG16_A_GAP_dual_inp import cnn_hybrid_color_single
from ess_func import read_pairs, sample_people, prewhiten, store_loss, hog_to_tensor, custom_loss
# -----------------------------------------------------------------------------------------------
def main(args):
# set the image parameters
img_rows = args.img_rows
img_cols = args.img_cols
img_dim_color = args.img_channels
# mix_prop = 1.0 # set the value of the mixing proportion
#############################################################################################################
################################## DEFINING MODEL ##########################################################
##############################################################################################################
model_alex = cnn_hybrid_color_single(img_rows, img_cols, img_dim_color) # load the model
# model_final = Model(model_alex.input, model_alex.output) # specify the input and output of the model
model_final = model_alex
print(model_final.summary()) # print the model summary
plot_model(model_final, to_file='./NIN_hybrid_bin_resnet_1x1-class', show_shapes=True) # save the model summary as a png file
lr = args.learning_rate # set the learning rate
# set the optimizer
optimizer = SGD(lr=lr, decay=1e-6, momentum=0.9)
# model compilation
model_final.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# print the value of the learning rate
print(K.get_value(optimizer.lr))
# --------------------------------------------------
#############################################################################################################
########################## GETTING TRAINING DATA AND TESTING DATA ##########################################
##############################################################################################################
# get the training data by calling the pairs function
# read the training data
train_pairs_r, training_data_r, training_label_r = read_pairs(args.tr_img_lab_r)
train_pairs_l, training_data_l, training_label_l = read_pairs(args.tr_img_lab_l)
assert len(training_data_r) == len(training_data_l)
# combine the left and right image in the training data to make a X x Y x 6 tensor
training_data = []
for i in range(len(training_data_r)):
# define the stereo pair
stereo_pair = [training_data_r[i], training_data_l[i]]
training_data.append(stereo_pair)
batch_num = 0
# initialize the live samples and fake samples
live_samples_ub = 0
attack_samples_ub = 0
live_samples = []
live_labels = []
attack_samples = []
attack_labels = []
# separate the live samples and fake samples to balance the both classes, i.e. live class and fake class
assert len(training_label_r) == len(training_label_l)
for i in range(len(training_data)):
if training_label_r[i] == 0:
live_samples.append(training_data[i])
live_labels.append(training_label_r[i])
live_samples_ub += 1
elif (training_label_r[i] == 1) | (training_label_r[i] == 2) | (training_label_r[i] == 3): # protocol_1
attack_samples.append(training_data[i])
attack_labels.append(training_label_r[i])
attack_samples_ub += 1
print("Live samples are %g ,\t attack samples are %g" % (live_samples_ub, attack_samples_ub))
# compute the difference; the live samples are always less than the fake samples in our case
diff = 0
if live_samples_ub < attack_samples_ub:
# compute the ratio
diff = np.int(attack_samples_ub / live_samples_ub)
print("The difference is :%g " % (diff))
else:
ValueError("The fake samples are less than then live samples")
# number of times the dataset has to be copied:
live_samples_b = live_samples
live_labels_b = live_labels
for i in range(diff - 1):
# print("length before balancing: %g" %len(live_samples_b))
sl_copy = live_samples.copy()
ll_copy = live_labels.copy()
live_samples_b = live_samples_b + sl_copy
live_labels_b = live_labels_b + ll_copy
# print("length after balancing: %g" % len(live_samples_b))
# balanced data
training_data_balanced = live_samples_b + attack_samples
training_label_balanced = live_labels_b + attack_labels
print("Balanced data samples: %g" % len(training_data_balanced))
# get the length of the training data
len_tr = len(training_data_balanced)
# get the number equal to the length of the training data
indices_tr = np.arange(len_tr)
np.random.shuffle(indices_tr)
# initialize the image counter
images_read = 0
train_img_data_r = []
train_img_data_l = []
for i in indices_tr:
if training_label_balanced[i] > 0:
training_label_balanced[i] = 1
train_img_data_r.append([training_data_balanced[i][0], training_label_balanced[i]]) # read the right image
train_img_data_l.append([training_data_balanced[i][1], training_label_balanced[i]]) # read the left image
# print(training_data_balanced[i][1])
# cv2.imshow('img1', cv2.imread(training_data_balanced[i][0]))
# cv2.waitKey()
# cv2.imshow('img2', cv2.imread(training_data_balanced[i][1]))
# cv2.waitKey()
images_read += 1
sys.stdout.write('train images read = {0}\r'.format(images_read))
sys.stdout.flush()
############################################################################################################
# read the test data
test_pairs, test_data_r, test_labels_r = read_pairs(args.tst_img_lab_r)
test_pairs, test_data_l, test_labels_l = read_pairs(args.tst_img_lab_l)
assert len(test_data_r) == len(test_data_l)
# combine the left and right image in the training data to make a X x Y x 6 tensor
test_data = []
for i in range(len(test_data_r)):
# define the stereo pair
stereo_pair_t = [test_data_r[i], test_data_l[i]]
test_data.append(stereo_pair_t)
test_labels = test_labels_r
images_read = 0
# get the length of the training data
len_test = len(test_data)
# get the number equal to the length of the training data
indices_test = np.arange(len_test)
test_img_data_r = []
test_img_data_l = []
for i in indices_test:
if test_labels[i] > 0:
test_labels[i] = 1
test_img_data_r.append([test_data[i][0], test_labels[i]]) # read the right test image
test_img_data_l.append([test_data[i][1], test_labels[i]]) # red the left test image
images_read += 1
sys.stdout.write('test images read = {0}\r'.format(images_read))
sys.stdout.flush()
#####################################################################################################
# make all the data in panda data frame format
train_df_r =
|
pd.DataFrame(train_img_data_r)
|
pandas.DataFrame
|
# coding: utf-8
# ## Preparation, Imports and Function Declarations
# In[ ]:
get_ipython().run_line_magic('config', 'IPCompleter.greedy = True')
# In[ ]:
# Install GGPLOT
get_ipython().system('python -m pip install ggplot')
# In[ ]:
from pprint import pprint
import geopy.distance
import datetime
import pandas as pd
from ggplot import *
from sklearn.feature_extraction import DictVectorizer
def get_distance_km(lat1, lon1, lat2, lon2):
return geopy.distance.distance((lat1, lon1), (lat2, lon2)).km
import datetime
def transform_date(date):
dates = date.split('-')
datef = datetime.datetime(int(dates[0]),int(dates[1]),int(dates[2]))
return datef.year, datef.month, datef.day, datef.weekday()
def Holiday(month, day):
if month == 7 and day <= 10: return 'IDD'
if month == 12: return 'CRI'
if month in [3,4]: return 'SRB'
if month == 11 and day >=22 and day<=28: return 'THG'
if month == 1: return 'NYR'
return 'NOT'
def Season(month,day):
if(month in [9,10,11]) : return 'AUT'
if(month in [12,1,2]) : return 'WIN'
if(month in [3,4,5]) : return 'SPR'
if(month in [6,7,8]) : return 'SUM'
return 'NOT'
def train(development):
df_train = pd.read_csv('dataset/train.csv')
y_train = df_train[['PAX']]
y_test = None
if(development==False):
df_test = pd.read_csv('dataset/test.csv')
else:
from sklearn.model_selection import train_test_split
df_train, df_test, y_train, y_test = train_test_split(df_train, y_train, test_size=0.2, random_state=42)
### Extract the date and add new features from date
# TRAIN SET
tfdates = df_train.apply(lambda row: transform_date(row['DateOfDeparture']), axis=1)
years = [t[0] for t in tfdates]
months = [t[1] for t in tfdates]
days = [t[2] for t in tfdates]
weekdays = [t[3] for t in tfdates]
df_train['Year'], df_train['Month'],df_train['Day'], df_train['WeekDay'] = years, months, days, weekdays
# TEST SET
tfdates = df_test.apply(lambda row: transform_date(row['DateOfDeparture']), axis=1)
years = [t[0] for t in tfdates]
months = [t[1] for t in tfdates]
days = [t[2] for t in tfdates]
weekdays = [t[3] for t in tfdates]
df_test['Year'], df_test['Month'],df_test['Day'], df_test['WeekDay'] = years, months, days, weekdays
### Extract the distance from coordinates, longtitude and latitude are inversed -- !!!Dataset's error!!!
# TRAIN SET
distances = df_train.apply(lambda row: round(get_distance_km(row['LongitudeDeparture'],row['LatitudeDeparture'],row['LongitudeArrival'],row['LatitudeArrival']),3), axis=1)
df_train['Distance'] = distances
# TEST SET
distances = df_test.apply(lambda row: round(get_distance_km(row['LongitudeDeparture'],row['LatitudeDeparture'],row['LongitudeArrival'],row['LatitudeArrival']),3), axis=1)
df_test['Distance'] = distances
### Set min and max weeks to departure
# TRAIN SET
mins = df_train.apply(lambda row: round(row['WeeksToDeparture']-row['std_wtd'],3), axis=1)
maxs = df_train.apply(lambda row: round(row['WeeksToDeparture']+row['std_wtd'],3), axis=1)
df_train['MinWTD'] = mins
df_train['MaxWTD'] = maxs
# TEST SET
mins = df_test.apply(lambda row: round(row['WeeksToDeparture']-row['std_wtd'],3), axis=1)
maxs = df_test.apply(lambda row: round(row['WeeksToDeparture']+row['std_wtd'],3), axis=1)
df_test['MinWTD'] = mins
df_test['MaxWTD'] = maxs
### Find holidays, seasons
# TRAIN SET
holis = df_train.apply(lambda row: Holiday(row['Month'],row['Day']), axis=1)
seas = df_train.apply(lambda row: Season(row['Month'],row['Day']), axis=1)
df_train['Holiday'] = holis
df_train['Season'] = seas
# TEST SET
holis = df_test.apply(lambda row: Holiday(row['Month'],row['Day']), axis=1)
seas = df_test.apply(lambda row: Season(row['Month'],row['Day']), axis=1)
df_test['Holiday'] = holis
df_test['Season'] = seas
torem = ['DateOfDeparture','CityDeparture','LongitudeDeparture','LatitudeDeparture','CityArrival','LongitudeArrival','LatitudeArrival','WeeksToDeparture','std_wtd','PAX','MinWTD','MaxWTD']
if(development==False):
df_train.drop(torem, axis=1, inplace=True)
torem.remove('PAX')
df_test.drop(torem, axis=1, inplace=True)
else:
df_train.drop(torem, axis=1, inplace=True)
df_test.drop(torem, axis=1, inplace=True)
df_train.reset_index(drop=True,inplace=True)
df_test.reset_index(drop=True,inplace=True)
print(df_train.head(),'\n'*5)
print(df_test.head())
return df_train, df_test, y_train, y_test
def runModel(df_train, df_test, y_train, showTsne):
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
# Departure and Arrival have the same values so we train only on Departure
le.fit(df_train['Departure'])
df_train['Departure'] = le.transform(df_train['Departure'])
df_train['Arrival'] = le.transform(df_train['Arrival'])
df_test['Departure'] = le.transform(df_test['Departure'])
df_test['Arrival'] = le.transform(df_test['Arrival'])
le.fit(df_train['Holiday'])
df_train['Holiday'] = le.transform(df_train['Holiday'])
le.fit(df_test['Holiday'])
df_test['Holiday'] = le.transform(df_test['Holiday'])
le.fit(df_train['Season'])
df_train['Season'] = le.transform(df_train['Season'])
le.fit(df_test['Season'])
df_test['Season'] = le.transform(df_test['Season'])
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
import numpy as np
import codecs
X_train = df_train
X_test = df_test
y_train = np.ravel(y_train)
### Scale the data
from sklearn.preprocessing import minmax_scale
X_train['Distance'] = minmax_scale(X_train['Distance'])
X_test['Distance'] = minmax_scale(X_test['Distance'])
# # One-Hot encoding
X_train_dep = pd.get_dummies(X_train['Departure'],prefix='dep')
X_train_arr = pd.get_dummies(X_train['Arrival'],prefix='arr')
cols = X_train.columns[[0,1,2,3,4,5,7,8]]
X_train[cols] = minmax_scale(X_train[cols])
X_train_extra = pd.concat([X_train['Departure'], X_train['Arrival'], X_train['Year'], X_train['Month'], X_train['Day'], X_train['WeekDay'], X_train['Holiday'], X_train['Season'], X_train['Distance']],axis=1)
from sklearn.decomposition import PCA
pca = PCA(n_components=1)
pca.fit(X_train_extra)
X_train_extra = pca.transform(X_train_extra)
X_train_extra = pd.DataFrame(X_train_extra)
X_train_extra.reset_index(drop=True,inplace=True)
X_train = pd.concat([X_train,X_train_extra,X_train_dep,X_train_arr],axis=1,ignore_index=True)
X_train.drop([0,1],axis=1, inplace=True)
idx_scale = [9]
for i in idx_scale:
X_train[i] = minmax_scale(X_train[i])
X_test_dep = pd.get_dummies(X_test['Departure'],prefix='dep')
X_test_arr = pd.get_dummies(X_test['Arrival'],prefix='arr')
cols = X_test.columns[[0,1,2,3,4,5,7,8]]
X_test[cols] = minmax_scale(X_test[cols])
X_test_extra = pd.concat([X_test['Departure'], X_test['Arrival'], X_test['Year'], X_test['Month'], X_test['Day'], X_test['WeekDay'], X_test['Holiday'], X_test['Season'], X_test['Distance']],axis=1)
X_test_extra = pca.transform(X_test_extra)
X_test_extra = pd.DataFrame(X_test_extra)
X_test_extra.reset_index(drop=True,inplace=True)
X_test = pd.concat([X_test,X_test_extra,X_test_dep,X_test_arr],axis=1, ignore_index=True)
X_test.drop([0,1],axis=1, inplace=True)
idx_scale = [9]
for i in idx_scale:
X_test[i] = minmax_scale(X_test[i])
## Print the data
print(X_train.head())
## TSNE
if(showTsne):
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2,n_iter=250)
tsne_res = tsne.fit_transform(X_train)
df_tnse = pd.DataFrame(tsne_res)
df_pax =
|
pd.DataFrame(y_train)
|
pandas.DataFrame
|
"""
Scrape Los Angeles Metro ridership data
"""
import datetime
import os
from urllib.parse import quote_plus
import bs4
import pandas as pd
import requests
import sqlalchemy
# The URL for the ridership form
RIDERSHIP_URL = "http://isotp.metro.net/MetroRidership/IndexSys.aspx"
# Parameters needed to validate the request
ASPX_PARAMETERS = ["__VIEWSTATE", "__EVENTVALIDATION"]
# The S3 bucket into which to load data.
S3_BUCKET = "s3://tmf-ita-data"
def get_form_data():
"""
Make an inital fetch of the form so we can scrape the options
as well as the parameters needed to validate our requests.
"""
# Fetch the page and parse it
r = requests.get(RIDERSHIP_URL)
r.raise_for_status()
soup = bs4.BeautifulSoup(r.text, features="html.parser")
# Get validation info
aspx_data = {
param: soup.find(attrs={"id": param}).attrs.get("value", "")
for param in ASPX_PARAMETERS
}
# Get all of the metro lines
line_options = soup.find(attrs={"id": "ContentPlaceHolder1_lbLines"}).select(
"option"
)
lines = [
option.attrs["value"]
for option in line_options
if option.attrs["value"] != "All"
]
# Get the available years
year_options = soup.find(attrs={"id": "ContentPlaceHolder1_ddlYear"}).select(
"option"
)
years = [option.attrs["value"] for option in year_options]
return lines, years, aspx_data
def submit_form(year, period, line, aspx_data):
"""
Submit a form to the Metro ridership site requesting data for a line.
Parameters
----------
year: int
The year for which to fetch the data.
period: str or int
The time period in which to fetch the data. Typically you will want
an integer month, though other values like quarters ("Q1") may work.
line: str or int
The Metro line number
aspx_data: dict
The metadata needed for forming a correct form submission.
Returns
-------
An HTML string of the resposnse.
"""
form_data = {
"ctl00$ContentPlaceHolder1$rbFYCY": "CY",
"ctl00$ContentPlaceHolder1$ddlYear": str(year),
"ctl00$ContentPlaceHolder1$ddlPeriod": str(period),
"ctl00$ContentPlaceHolder1$btnSubmit": "Submit",
"ctl00$ContentPlaceHolder1$lbLines": str(line),
**aspx_data,
}
r = requests.post(RIDERSHIP_URL, data=form_data)
r.raise_for_status()
if r.text.find("Data not available yet") != -1:
raise ValueError(f"Data not available for {year}, {period}, {line}")
return r.text
def parse_response(html):
"""
Parse an HTML response from the ridership form into a dataframe.
Parameters
----------
html: str
The HTML webpage from the ridership site.
Returns
-------
A dataframe from the parsed HTML table.
"""
tables = pd.read_html(
html,
flavor="bs4",
attrs={"id": "ContentPlaceHolder1_ASPxRoundPanel2_gvRidership"},
)
if len(tables) == 0:
raise ValueError("No table found")
df = tables[0]
# Filter out the "Total" row
df = df[df["Day Type"] != "Total"]
return df
def get_ridership_data(year, period, line, aspx_data):
"""
Get ridership for a given year, time period, and line.
Parameters
----------
year: int
The year for which to fetch the data.
period: str or int
The time period in which to fetch the data. Typically you will want
an integer month, though other values like quarters ("Q1") may work.
line: str or int
The Metro line number
aspx_data: dict
The metadata needed for forming a correct form submission.
Returns
-------
A dataframe with ridership data for the line/period/year.
"""
html = submit_form(year, period, line, aspx_data)
df = parse_response(html)
df = df.assign(year=year, month=period, line=line)
return df
def get_all_ridership_data(verbosity=0):
"""
Fetch all ridership data from the web form.
"""
lines, years, aspx_data = get_form_data()
months = [str(i) for i in range(1, 13)]
ridership = pd.DataFrame()
# Get the current timestamp so we don't try to fetch from the future.
now =
|
pd.Timestamp.now()
|
pandas.Timestamp.now
|
# -*- coding: utf-8 -*-
import nose
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class TestIndexCoercion(tm.TestCase):
_multiprocess_can_split_ = True
def test_setitem_index_numeric_coercion_int(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.index.dtype, np.int64)
# int + int -> int
temp = s.copy()
temp[5] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 5]))
self.assertEqual(temp.index.dtype, np.int64)
# int + float -> float
temp = s.copy()
temp[1.1] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 1.1]))
self.assertEqual(temp.index.dtype, np.float64)
def test_setitem_index_numeric_coercion_float(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(s.index.dtype, np.float64)
# float + int -> int
temp = s.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
temp = s.copy()
temp[5.1] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=[1.1, 2.1, 3.1, 4.1, 5.1])
tm.assert_series_equal(temp, exp)
self.assertEqual(temp.index.dtype, np.float64)
def test_insert_numeric_coercion_int(self):
idx = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(idx.dtype, np.int64)
# int + int -> int
res = idx.insert(1, 1)
tm.assert_index_equal(res, pd.Index([1, 1, 2, 3, 4]))
self.assertEqual(res.dtype, np.int64)
# int + float -> float
res = idx.insert(1, 1.1)
tm.assert_index_equal(res, pd.Index([1, 1.1, 2, 3, 4]))
self.assertEqual(res.dtype, np.float64)
# int + bool -> int
res = idx.insert(1, False)
tm.assert_index_equal(res, pd.Index([1, 0, 2, 3, 4]))
self.assertEqual(res.dtype, np.int64)
def test_insert_numeric_coercion_float(self):
idx = pd.Float64Index([1, 2, 3, 4])
self.assertEqual(idx.dtype, np.float64)
# float + int -> int
res = idx.insert(1, 1)
tm.assert_index_equal(res, pd.Index([1., 1., 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
# float + float -> float
res = idx.insert(1, 1.1)
tm.assert_index_equal(res, pd.Index([1., 1.1, 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
# float + bool -> float
res = idx.insert(1, False)
tm.assert_index_equal(res, pd.Index([1., 0., 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
class TestSeriesCoercion(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.rep = {}
self.rep['object'] = ['a', 'b']
self.rep['int64'] = [4, 5]
self.rep['float64'] = [1.1, 2.2]
self.rep['complex128'] = [1 + 1j, 2 + 2j]
self.rep['bool'] = [True, False]
def test_setitem_numeric_coercion_int(self):
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.dtype, np.int64)
# int + int -> int
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
temp = s.copy()
temp[1] = 1.1
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
# int + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 3, 4]))
self.assertEqual(temp.dtype, np.complex128)
# int + bool -> int
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
def test_setitem_numeric_coercion_float(self):
s = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(s.dtype, np.float64)
# float + int -> float
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1.1, 1.0, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
# float + float -> float
temp = s.copy()
temp[1] = 1.1
tm.assert_series_equal(temp, pd.Series([1.1, 1.1, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
# float + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp, pd.Series([1.1, 1 + 1j, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.complex128)
# float + bool -> float
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1.1, 1.0, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
def test_setitem_numeric_coercion_complex(self):
s = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(s.dtype, np.complex128)
# complex + int -> complex
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + float -> complex
temp = s.copy()
temp[1] = 1.1
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp,
pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + bool -> complex
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
def test_setitem_numeric_coercion_bool(self):
s = pd.Series([True, False, True, False])
self.assertEqual(s.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
temp = s.copy()
temp[1] = 1
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# TODO_GH12747 The result must be int
temp = s.copy()
temp[1] = 3 # greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
temp = s.copy()
temp[1] = 1.1
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
temp = s.copy()
temp[1] = 1 + 1j
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# bool + bool -> int
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
def test_where_numeric_coercion_int(self):
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.dtype, np.int64)
cond = pd.Series([True, False, True, False])
# int + int -> int
res = s.where(cond, 1)
tm.assert_series_equal(res, pd.Series([1, 1, 3, 1]))
self.assertEqual(res.dtype, np.int64)
res = s.where(cond, pd.Series([5, 6, 7, 8]))
tm.assert_series_equal(res, pd.Series([1, 6, 3, 8]))
self.assertEqual(res.dtype, np.int64)
# int + float -> float
res = s.where(cond, 1.1)
tm.assert_series_equal(res, pd.Series([1, 1.1, 3, 1.1]))
self.assertEqual(res.dtype, np.float64)
res = s.where(cond, pd.Series([5.5, 6.6, 7.7, 8.8]))
tm.assert_series_equal(res, pd.Series([1, 6.6, 3, 8.8]))
self.assertEqual(res.dtype, np.float64)
# int + complex -> complex
res = s.where(cond, 1 + 1j)
tm.assert_series_equal(res, pd.Series([1, 1 + 1j, 3, 1 + 1j]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]))
tm.assert_series_equal(res, pd.Series([1, 6 + 6j, 3, 8 + 8j]))
self.assertEqual(res.dtype, np.complex128)
# int + bool -> int
res = s.where(cond, True)
tm.assert_series_equal(res, pd.Series([1, 1, 3, 1]))
self.assertEqual(res.dtype, np.int64)
res = s.where(cond, pd.Series([True, False, True, True]))
tm.assert_series_equal(res, pd.Series([1, 0, 3, 1]))
self.assertEqual(res.dtype, np.int64)
def test_where_numeric_coercion_float(self):
s = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(s.dtype, np.float64)
cond =
|
pd.Series([True, False, True, False])
|
pandas.Series
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
import collections
import pytest
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from pandas.compat import StringIO, u
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, ensure_clean)
import pandas.util.testing as tm
from .common import TestData
class TestSeriesToCSV(TestData):
def read_csv(self, path, **kwargs):
params = dict(squeeze=True, index_col=0,
header=None, parse_dates=True)
params.update(**kwargs)
header = params.get("header")
out = pd.read_csv(path, **params)
if header is None:
out.name = out.index.name = None
return out
def test_from_csv_deprecation(self):
# see gh-17812
with ensure_clean() as path:
self.ts.to_csv(path)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
ts = self.read_csv(path)
depr_ts = Series.from_csv(path)
assert_series_equal(depr_ts, ts)
def test_from_csv(self):
with ensure_clean() as path:
self.ts.to_csv(path)
ts = self.read_csv(path)
assert_series_equal(self.ts, ts, check_names=False)
assert ts.name is None
assert ts.index.name is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
depr_ts = Series.from_csv(path)
assert_series_equal(depr_ts, ts)
# see gh-10483
self.ts.to_csv(path, header=True)
ts_h = self.read_csv(path, header=0)
assert ts_h.name == "ts"
self.series.to_csv(path)
series = self.read_csv(path)
assert_series_equal(self.series, series, check_names=False)
assert series.name is None
assert series.index.name is None
self.series.to_csv(path, header=True)
series_h = self.read_csv(path, header=0)
assert series_h.name == "series"
outfile = open(path, "w")
outfile.write("1998-01-01|1.0\n1999-01-01|2.0")
outfile.close()
series = self.read_csv(path, sep="|")
check_series = Series({datetime(1998, 1, 1): 1.0,
datetime(1999, 1, 1): 2.0})
|
assert_series_equal(check_series, series)
|
pandas.util.testing.assert_series_equal
|
"""
handle preprocessing and loading of data.
"""
import html
import os.path
import pandas as pd
import re
from nltk import word_tokenize, pos_tag
from nltk.corpus import stopwords, wordnet
from nltk.stem.wordnet import WordNetLemmatizer
class LoadData:
@classmethod
def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']):
"""
preprocess the data in file location and saves it as a csv file (appending
'_preprocessed' before '.csv). The preprocessing us in following ways:
1) extract message and datetime columns.
2) sort according to datetime in descending order (newest first)
3) remove links, @ and $ references, extra whitespaces, extra '.', digits, slashes,
hyphons
4) decode html entities
5) convert everything to lower case
"""
if 'datetime' in columns:
dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True)
dataFrame.sort_values(by='datetime', ascending=False)
else:
dataFrame = pd.read_csv(file_location, usecols=columns)
dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\.|https?://).*?(\s|$)|@.*?(\s|$)|\$.*?(\s|$)|\d|\%|\\|/|-|_', ' ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\.+', '. ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\,+', ', ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\?+', '? ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\s+', ' ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower())
dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False)
@classmethod
def labelled_data_lexicon_analysis(cls):
"""
extract keywords from labelled stocktwits data for improved accuracy in scoring
for each labelled message do
1) tokenize the message
2) perform POS tagging
3) if a sense is present in wordnet then, lemmatize the word and remove stop words else ignore the word
remove intersections from the two lists before saving
"""
dataFrame = LoadData.get_labelled_data()
bullish_keywords = set()
bearish_keywords = set()
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
for index, row in dataFrame.iterrows():
tokens = word_tokenize(row['message'])
pos = pos_tag(tokens)
selected_tags = set()
for i in range(len(pos)):
if len(wordnet.synsets(pos[i][0])):
if pos[i][1].startswith('J'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a'))
elif pos[i][1].startswith('V'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v'))
elif pos[i][1].startswith('N'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n'))
elif pos[i][1].startswith('R'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r'))
selected_tags -= stop_words
if row['sentiment'] == 'Bullish':
bullish_keywords = bullish_keywords.union(selected_tags)
elif row['sentiment'] == 'Bearish':
bearish_keywords = bearish_keywords.union(selected_tags)
updated_bullish_keywords = bullish_keywords - bearish_keywords
updated_bearish_keywords = bearish_keywords - bullish_keywords
with open('data-extractor/lexicon_bullish_words.txt', 'a') as file:
for word in updated_bullish_keywords:
file.write(word+"\n")
with open('data-extractor/lexicon_bearish_words.txt', 'a') as file:
for word in updated_bearish_keywords:
file.write(word+"\n")
@classmethod
def get_stocktwits_data(cls, symbol):
"""
get_data loads the preprocessed data of 'symbol' from data-extractor
and returns a pandas dataframe with columns [message(object), datetime(datetime64[ns])].
"""
file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv')
dataFrame = pd.read_csv(file_location)
return dataFrame
@classmethod
def get_price_data(cls, symbol):
"""
loads the price data of 'symbol' from data-extractor
and returns a pandas dataframe with columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)].
"""
file_location = 'data-extractor/stock_prices_'+symbol+'.csv'
dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True)
return dataFrame
@classmethod
def get_labelled_data(cls, type='complete'):
"""
get_labelled_data loads the preprocessed labelled data of stocktwits from data-extractor
and returns a pandas dataframe with columns [sentiment(object), message(object)].
"""
if type == 'complete':
file_location = 'data-extractor/labelled_data_complete_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message'])
elif type == 'training':
file_location = 'data-extractor/labelled_data_training_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.get_training_data()
elif type == 'test':
file_location = 'data-extractor/labelled_data_test_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message'])
dataFrame = pd.read_csv(file_location)
return dataFrame
@classmethod
def get_custom_lexicon(cls):
"""
get custom lexicon of bearish and bullish words respectively
"""
file_location1 = 'data-extractor/lexicon_bearish_words.txt'
file_location2 = 'data-extractor/lexicon_bullish_words.txt'
if os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is False:
LoadData.labelled_data_lexicon_analysis()
dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word'])
dataFrameBullish =
|
pd.read_csv(file_location2, header=None, names=['word'])
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""Main formatting source code to format modelling results for plotting.
This code was written to process PLEXOS HDF5 outputs to get them ready for plotting.
Once the data is processed it is outputted as an intermediary HDF5 file format so that
it can be read into the marmot_plot_main.py file
@author: <NAME>
"""
# ===============================================================================
# Import Python Libraries
# ===============================================================================
import os
import sys
import pathlib
FILE_DIR = pathlib.Path(__file__).parent.absolute() # Location of this module
if __name__ == '__main__': # Add Marmot directory to sys path if running from __main__
if os.path.dirname(os.path.dirname(__file__)) not in sys.path:
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
os.chdir(pathlib.Path(__file__).parent.absolute().parent.absolute())
import time
import re
import logging
import logging.config
import pandas as pd
import h5py
import yaml
from typing import Union
try:
from marmot.meta_data import MetaData
except ModuleNotFoundError:
print("Attempted import of Marmot as a module from a Git directory. ", end='')
print("Import of Marmot will not function in this way. ", end='')
print("To import Marmot as a module use the preferred method of pip installing Marmot, ", end='')
print("or add the Marmot directory to the system path, see ReadME for details.\n")
print("System will now exit")
sys.exit()
import marmot.config.mconfig as mconfig
# Import as Submodule
try:
from h5plexos.query import PLEXOSSolution
except ModuleNotFoundError:
from marmot.h5plexos.h5plexos.query import PLEXOSSolution
# A bug in pandas requires this to be included,
# otherwise df.to_string truncates long strings. Fix available in Pandas 1.0
# but leaving here in case user version not up to date
pd.set_option("display.max_colwidth", 1000)
# Conversion units dict, key values is a tuple of new unit name and conversion multiplier
UNITS_CONVERSION = {
'kW': ('MW', 1e-3),
'MW': ('MW', 1),
'GW': ('MW', 1e3),
'TW': ('MW', 1e6),
'kWh': ('MWh', 1e-3),
'MWh': ('MWh', 1),
'GWh': ('MWh', 1e3),
'TWh': ('MWh', 1e6),
'lb': ('kg', 0.453592),
'ton': ('kg', 907.18474),
'kg': ('kg', 1),
'tonne': ('kg', 1000),
'$': ('$', 1),
'$000': ('$', 1000),
'h': ('h', 1),
'MMBTU': ('MMBTU', 1),
'GBTU': ('MMBTU', 1000),
'GJ"': ('MMBTU', 0.947817),
'TJ': ('MMBTU', 947.817120),
'$/MW': ('$/MW', 1),
'lb/MWh' : ('kg/MWh', 0.453592),
'Kg/MWh': ('Kg/MWh', 1)
}
class SetupLogger():
"""Sets up the python logger.
This class handles the following.
1. Configures logger from marmot_logging_config.yml file.
2. Handles rollover of log file on each instantiation.
3. Sets log_directory.
4. Append optional suffix to the end of the log file name
Optional suffix is useful when running multiple processes in parallel to
allow logging to separate files.
"""
def __init__(self, log_directory: str = 'logs',
log_suffix: str = None):
"""
Args:
log_directory (str, optional): log directory to save logs.
Defaults to 'logs'.
log_suffix (str, optional): Optional suffix to add to end of log file.
Defaults to None.
"""
if log_suffix is None:
self.log_suffix = ''
else:
self.log_suffix = f'_{log_suffix}'
current_dir = os.getcwd()
os.chdir(FILE_DIR)
try:
os.makedirs(log_directory)
except FileExistsError:
# log directory already exists
pass
with open('config/marmot_logging_config.yml', 'rt') as f:
conf = yaml.safe_load(f.read())
conf['handlers']['warning_handler']['filename'] = \
(conf['handlers']['warning_handler']['filename']
.format(log_directory, 'formatter', self.log_suffix))
conf['handlers']['info_handler']['filename'] = \
(conf['handlers']['info_handler']['filename']
.format(log_directory, 'formatter', self.log_suffix))
logging.config.dictConfig(conf)
self.logger = logging.getLogger('marmot_format')
# Creates a new log file for next run
self.logger.handlers[1].doRollover()
self.logger.handlers[2].doRollover()
os.chdir(current_dir)
class Process(SetupLogger):
"""Process PLEXOS class specific data from h5plexos database.
All methods are PLEXOS Class specific e.g generator, region, zone, line etc.
"""
def __init__(self, df: pd.DataFrame, metadata: MetaData,
model: str, Region_Mapping: pd.DataFrame,
emit_names: pd.DataFrame, logger: logging.Logger):
"""
Args:
df (pd.DataFrame): Unprocessed h5plexos dataframe containing
class and property specifc data.
metadata (MetaData): Instantiation of MetaData for specific
h5plexos file.
model (str): Name of specific PLEXOS model partition
Region_Mapping (pd.DataFrame): DataFrame to map custom
regions/zones to create custom aggregations.
emit_names (pd.DataFrame): DataFrame with 2 columns to rename
emission names.
logger (logging.Logger): logger object from SetupLogger.
"""
# certain methods require information from metadata. metadata is now
# passed in as an instance of MetaData class for the appropriate model
self.df = df
self.metadata = metadata
self.model = model
self.Region_Mapping = Region_Mapping
self.emit_names = emit_names
self.logger = logger
if not self.emit_names.empty:
self.emit_names_dict = (self.emit_names[['Original', 'New']]
.set_index("Original").to_dict()["New"])
def df_process_generator(self) -> pd.DataFrame:
"""Format PLEXOS Generator Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['tech', 'gen_name'], level=['category', 'name'], inplace=True)
if self.metadata.region_generator_category(self.model).empty is False:
region_gen_idx = pd.CategoricalIndex(self.metadata.region_generator_category(self.model)
.index.get_level_values(0))
region_gen_idx = region_gen_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_region = pd.MultiIndex(levels=df.index.levels + [region_gen_idx.categories],
codes=df.index.codes + [region_gen_idx.codes],
names=df.index.names + region_gen_idx.names)
else:
idx_region = df.index
if self.metadata.zone_generator_category(self.model).empty is False:
zone_gen_idx = pd.CategoricalIndex(self.metadata.zone_generator_category(self.model)
.index.get_level_values(0))
zone_gen_idx = zone_gen_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_zone = pd.MultiIndex(levels=idx_region.levels + [zone_gen_idx.categories],
codes=idx_region.codes + [zone_gen_idx.codes],
names=idx_region.names + zone_gen_idx.names)
else:
idx_zone = idx_region
if not self.Region_Mapping.empty:
region_gen_mapping_idx = pd.MultiIndex.from_frame(self.metadata.region_generator_category(self.model)
.merge(self.Region_Mapping,
how="left",
on='region')
.sort_values(by=['tech', 'gen_name'])
.drop(['region', 'tech', 'gen_name'], axis=1)
)
region_gen_mapping_idx = region_gen_mapping_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx_map = pd.MultiIndex(levels=idx_zone.levels + region_gen_mapping_idx.levels,
codes=idx_zone.codes + region_gen_mapping_idx.codes,
names=idx_zone.names + region_gen_mapping_idx.names)
else:
idx_map = idx_zone
df = pd.DataFrame(data=df.values.reshape(-1), index=idx_map)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_region(self) -> pd.DataFrame:
"""Format PLEXOS Region Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df.index.rename('region', level='name', inplace=True)
# checks if Region_Mapping contains data to merge, skips if empty
if not self.Region_Mapping.empty:
mapping_idx = pd.MultiIndex.from_frame(self.metadata.regions(self.model)
.merge(self.Region_Mapping,
how="left",
on='region')
.drop(['region', 'category'], axis=1)
)
mapping_idx = mapping_idx.repeat(len(df.index.get_level_values('timestamp').unique()))
idx = pd.MultiIndex(levels=df.index.levels + mapping_idx.levels,
codes=df.index.codes + mapping_idx.codes,
names=df.index.names + mapping_idx.names)
else:
idx = df.index
df = pd.DataFrame(data=df.values.reshape(-1), index=idx)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # Move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_zone(self) -> pd.DataFrame:
"""Format PLEXOS Zone Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df.index.rename('zone', level='name', inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_line(self) -> pd.DataFrame:
"""Format PLEXOS Line Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df.index.rename('line_name', level='name', inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_interface(self) -> pd.DataFrame:
"""Format PLEXOS PLEXOS Interface Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['interface_name', 'interface_category'],
level=['name', 'category'], inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_reserve(self) -> pd.DataFrame:
"""Format PLEXOS Reserve Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['parent', 'Type'], level=['name', 'category'], inplace=True)
df = df.reset_index() # unzip the levels in index
if self.metadata.reserves_regions(self.model).empty is False:
# Merges in regions where reserves are located
df = df.merge(self.metadata.reserves_regions(self.model),
how='left', on='parent')
if self.metadata.reserves_zones(self.model).empty is False:
# Merges in zones where reserves are located
df = df.merge(self.metadata.reserves_zones(self.model),
how='left', on='parent')
df_col = list(df.columns) # Gets names of all columns in df and places in list
df_col.remove(0)
# move timestamp to start of df
df_col.insert(0, df_col.pop(df_col.index("timestamp")))
df.set_index(df_col, inplace=True)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_reserves_generators(self) -> pd.DataFrame:
"""Format PLEXOS Reserve_Generators Relational Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['gen_name'], level=['child'], inplace=True)
df = df.reset_index() # unzip the levels in index
df = df.merge(self.metadata.generator_category(self.model),
how='left', on='gen_name')
# merging in generator region/zones first prevents double
# counting in cases where multiple model regions are within a reserve region
if self.metadata.region_generators(self.model).empty is False:
df = df.merge(self.metadata.region_generators(self.model),
how='left', on='gen_name')
if self.metadata.zone_generators(self.model).empty is False:
df = df.merge(self.metadata.zone_generators(self.model),
how='left', on='gen_name')
# now merge in reserve regions/zones
if self.metadata.reserves_regions(self.model).empty is False:
# Merges in regions where reserves are located
df = df.merge(self.metadata.reserves_regions(self.model),
how='left', on=['parent', 'region'])
if self.metadata.reserves_zones(self.model).empty is False:
# Merges in zones where reserves are located
df = df.merge(self.metadata.reserves_zones(self.model),
how='left', on=['parent', 'zone'])
df_col = list(df.columns) # Gets names of all columns in df and places in list
df_col.remove(0)
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df.set_index(df_col, inplace=True)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_fuel(self) -> pd.DataFrame:
"""Format PLEXOS Fuel Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property", "category"])
df.index.rename('fuel_type', level='name', inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_constraint(self) -> pd.DataFrame:
"""Format PLEXOS Constraint Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['constraint_category', 'constraint'],
level=['category', 'name'], inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_emission(self) -> pd.DataFrame:
"""Format PLEXOS Emission Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename('emission_type', level='name', inplace=True)
df = pd.DataFrame(data=df.values.reshape(-1), index=df.index)
df_col = list(df.index.names) # Gets names of all columns in df and places in list
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df = df.reorder_levels(df_col, axis=0)
df[0] = pd.to_numeric(df[0], downcast='float')
return df
def df_process_emissions_generators(self) -> pd.DataFrame:
"""Format PLEXOS Emissions_Generators Relational Class data.
Returns:
pd.DataFrame: Processed output, single value column with multiindex.
"""
df = self.df.droplevel(level=["band", "property"])
df.index.rename(['gen_name'], level=['child'], inplace=True)
df.index.rename(['pollutant'], level=['parent'], inplace=True)
df = df.reset_index() # unzip the levels in index
# merge in tech information
df = df.merge(self.metadata.generator_category(self.model),
how='left', on='gen_name')
# merge in region and zone information
if self.metadata.region_generator_category(self.model).empty is False:
# merge in region information
df = df.merge(self.metadata.region_generator_category(self.model).reset_index(),
how='left', on=['gen_name', 'tech'])
if self.metadata.zone_generator_category(self.model).empty is False:
# Merges in zones where reserves are located
df = df.merge(self.metadata.zone_generator_category(self.model).reset_index(),
how='left', on=['gen_name', 'tech'])
if not self.Region_Mapping.empty:
df = df.merge(self.Region_Mapping, how="left", on="region")
if not self.emit_names.empty:
# reclassify emissions as specified by user in mapping
df['pollutant'] = pd.Categorical(df['pollutant'].map(lambda x: self.emit_names_dict.get(x, x)))
# remove categoricals (otherwise h5 save will fail)
df = df.astype({'tech': 'object', 'pollutant': 'object'})
# Checks if all emissions categories have been identified and matched.
# If not, lists categories that need a match
if not self.emit_names.empty:
if self.emit_names_dict != {} and (set(df['pollutant'].unique()).issubset(self.emit_names["New"].unique())) is False:
missing_emit_cat = list((set(df['pollutant'].unique())) - (set(self.emit_names["New"].unique())))
self.logger.warning(f"The following emission objects do not have a correct category mapping: {missing_emit_cat}\n")
df_col = list(df.columns) # Gets names of all columns in df and places in list
df_col.remove(0)
df_col.insert(0, df_col.pop(df_col.index("timestamp"))) # move timestamp to start of df
df.set_index(df_col, inplace=True)
# downcast values to save on memory
df[0] =
|
pd.to_numeric(df[0].values, downcast='float')
|
pandas.to_numeric
|
# %%
#imports
import pandas as pd
import numpy as np
# Multiple types of models from sklearn
from sklearn import svm
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import NuSVC
import hvplot.pandas
from finta import TA
from pathlib import Path
import datetime as dt
# %%
# import functions
from utils.AlpacaFunctions import get_historical_dataframe, get_crypto_bars, get_news
from utils.data_process import return_rolling_averages
from utils.data_process import return_crossovers
from utils.data_process import return_weighted_crossovers
#from sentiment_functions import get_sentiments
def getStockModel(ticker, numDays, initial_capital):
numDays = numDays
today = dt.date.today()
two_years_ago = (today - dt.timedelta(days=numDays))
yesterday = (today - dt.timedelta(days=1)).isoformat()
start = two_years_ago
end= yesterday
symbol = ticker
timeframe='1Day'
limit = 5000
stock_df = pd.DataFrame()
stock_df = get_historical_dataframe(symbol=symbol, start=start, end=end, timeframe=timeframe)
# Iterating through tickers to isolate and concat close data
# Normalizing stock dataframe and isolating the close values
close_df = pd.DataFrame(stock_df["close"])
# %%
# Beginning our data processing for the stock close data with rolling averages, crossovers, etc.
return_rolling_averages(close_df)
# %%
cross_df = return_crossovers(close_df)
# %%
cross_signals = cross_df.sum(axis=1)
# %%
pct_change_df = close_df.pct_change()
# %%
cross_weighted_df = return_weighted_crossovers(close_df, pct_change_df)
# %%
cross_signals_weighted = pd.DataFrame(cross_weighted_df.sum(axis=1))
# %%
# Using the finta library, we will implement more metrics for evaluation to be added to our signals dataframe
from finta import TA
finta_df = pd.DataFrame()
finta_df['vama'] = TA.VAMA(stock_df)
finta_df['rsi'] = TA.RSI(stock_df)
finta_df['ao'] = TA.AO(stock_df)
finta_df['ema'] = TA.EMA(stock_df)
finta_df['evwma'] = TA.EVWMA(stock_df)
finta_df['msd'] = TA.MSD(stock_df)
finta_df['efi'] = TA.EFI(stock_df)
finta_df['stochrsi'] = TA.STOCHRSI(stock_df)
finta_df['tp'] = TA.TP(stock_df)
finta_df['frama'] = TA.FRAMA(stock_df)
finta_df['kama'] = TA.KAMA(stock_df)
finta_df['hma'] = TA.HMA(stock_df)
finta_df['obv'] = TA.OBV(stock_df)
finta_df['cfi'] = TA.CFI(stock_df)
finta_df['sma'] = TA.SMA(stock_df)
finta_df['ssma'] = TA.SSMA(stock_df)
finta_df['dema'] = TA.DEMA(stock_df)
finta_df['tema'] = TA.TEMA(stock_df)
finta_df['trima'] = TA.TRIMA(stock_df)
finta_df['trix'] = TA.TRIX(stock_df)
finta_df['smm'] = TA.SMM(stock_df)
finta_df['zlema'] = TA.ZLEMA(stock_df)
finta_df['smma'] = TA.SMMA(stock_df)
finta_df['frama'] = TA.FRAMA(stock_df)
finta_df['mom'] = TA.MOM(stock_df)
finta_df['uo'] = TA.UO(stock_df)
finta_df['vzo'] = TA.VZO(stock_df)
finta_df['pzo'] = TA.PZO(stock_df)
finta_df.fillna(0, inplace=True)
# %%
pct_change_df.rename(columns={'close': 'pct'}, inplace=True)
#stock_df.drop(columns='symbol', inplace=True)
# %%
# Concatenating dataframe with our cumulative signals
signals_input_df = pd.DataFrame()
signals_input_df = pd.concat([stock_df, pct_change_df, cross_df, pct_change_df,
cross_signals, cross_signals_weighted, cross_weighted_df, finta_df], axis=1).dropna()
# %%
#Assigning our signals dataframe to X for train/test split
X = signals_input_df.dropna()
# %%
# Shifting our close df and comparing to original close df, we generate signals for whether or not the stock
# will go up for the following day. We then convert the returned 0s to -1s for more robust predictions.
y_signal = pd.DataFrame(
((close_df["close"] > close_df["close"].shift()).shift(-1))*1, index=close_df.index)
y_signal['close'] = np.where(y_signal['close'] == 0, -1, 1)
# %%
# Assigning the y_signals to our X index values
y_signal = y_signal.loc[X.index]
# %%
# Displaying our y_signal data and stock close values to ensure the signals are correct.
#display(y_signal, close_df['close'])
# %%
# Assigning the y_signal['close'] data to y to create an array for train/test split
y = np.array(y_signal['close'])
# %%
# Establishing train/test split
train_num = int(X.shape[0] * 0.9)
test_num = int(X.shape[0]-train_num)
X_train = X[:train_num]
X_test = X[-test_num:]
y_train = y[:train_num]
y_test = y[-test_num:]
# %%
# Scaling data for model processing
scaler = StandardScaler()
# %%
scaler.fit(X_train)
# %%
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# %%
# Our first model is the 'svm' model from sklearn
svm_model = svm.SVC()
svm_model = svm_model.fit(X_train_scaled, y_train)
svm_predictions = svm_model.predict(X_test_scaled)
# %%
# Checking the accuracy of the 'svm' model
svm_training_report = classification_report(y_test, svm_predictions, output_dict=True)
svm_training_report = pd.DataFrame(svm_training_report)
svm_training_report = pd.DataFrame(svm_training_report.iloc[2][0:2])
svm_minus = svm_training_report.iloc[0, 0]
svm_plus = svm_training_report.iloc[1, 0]
svm_training_report.columns = ['svm']
# %%
knc = KNeighborsClassifier()
knc_model = knc.fit(X_train_scaled, y_train)
knc_predictions = knc_model.predict(X_test_scaled)
knc_training_report = classification_report(y_test, knc_predictions, output_dict=True)
knc_training_report = pd.DataFrame(knc_training_report)
knc_training_report = pd.DataFrame(knc_training_report.iloc[2][0:2])
knc_minus = knc_training_report.iloc[0, 0]
knc_plus = knc_training_report.iloc[1, 0]
knc_training_report.columns = ['knc']
# %%
nvc = NuSVC()
nvc_model = nvc.fit(X_train_scaled, y_train)
nvc_predictions = nvc_model.predict(X_test_scaled)
nvc_training_report = classification_report(y_test, nvc_predictions, output_dict=True)
nvc_training_report = pd.DataFrame(nvc_training_report)
nvc_training_report = pd.DataFrame(nvc_training_report.iloc[2][0:2])
nvc_minus = nvc_training_report.iloc[0, 0]
nvc_plus = nvc_training_report.iloc[1, 0]
nvc_training_report.columns = ['nvc']
# %%
# Implementing logistical regression model from sklearn
lr_model = LogisticRegression(max_iter=300, verbose=True)
lr_model.fit(X_train_scaled, y_train)
lr_predictions = lr_model.predict(X_test_scaled)
lr_training_report = classification_report(y_test, lr_predictions, output_dict=True)
lr_training_report = pd.DataFrame(lr_training_report)
lr_training_report = pd.DataFrame(lr_training_report.iloc[2][0:2])
lr_minus = lr_training_report.iloc[0, 0]
lr_plus = lr_training_report.iloc[1, 0]
lr_training_report.columns = ['lr']
# %%
rfc = RandomForestClassifier()
rfc.fit(X_train_scaled, y_train)
rfc_predictions = rfc.predict(X_test_scaled)
rfc_training_report = classification_report(y_test, rfc_predictions, output_dict=True)
rfc_training_report = pd.DataFrame(rfc_training_report)
rfc_training_report = pd.DataFrame(rfc_training_report.iloc[2][0:2])
rfc_minus = rfc_training_report.iloc[0, 0]
rfc_plus = rfc_training_report.iloc[1, 0]
rfc_training_report.columns = ['rfc']
# %%
# Gradient boosting classifier from sklearn
model = GradientBoostingClassifier(random_state=0)
model.fit(X_train_scaled, y_train)
gbc_predictions = model.predict(X_test_scaled)
gbc_training_report = classification_report(y_test, gbc_predictions, output_dict=True)
gbc_training_report = pd.DataFrame(gbc_training_report)
gbc_training_report = pd.DataFrame(gbc_training_report.iloc[2][0:2])
gbc_minus = gbc_training_report.iloc[0, 0]
gbc_plus = gbc_training_report.iloc[1, 0]
gbc_training_report.columns = ['gbc']
# %%
# Implementing SCDClassifier, using 'log' loss metric for probabilistic prediction values
sgdc = SGDClassifier(max_iter=1000)
sgdc.fit(X_train_scaled, y_train)
sgdc_preds = sgdc.predict(X_test_scaled)
sgdc_training_report = classification_report(y_test, sgdc_preds, output_dict=True)
sgdc_training_report =
|
pd.DataFrame(sgdc_training_report)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 29 20:24:06 2016
@author: Matt
"""
from Reg_Model import Reg_Model
import pandas as pd
import matplotlib.pyplot as plt
class Game_Scores(object):
"""
Takes a set of games for training and makes model from them
"""
def __init__(self, schedule, x, target_name):
self.x = x
self.target = schedule[target_name]
self.answers = [1 if x>0 else 0 for x in schedule[target_name]]
self.pred = None
self.win_scores = None
self.rank_order = None
self.model = Reg_Model()
self.model.set_training(self.x, self.target)
self.model.calc_model()
#Returns df of features
def get_x_vars(self):
return self.x
#Returns model
def get_model(self):
return self.model
#For some test accounts, returns the predictions
def get_pred(self, test_x):
self.pred = self.model.get_pred(test_x)
return self.pred
#Looks at training data and returns overall accuracy
def get_accuracy(self):
pred = self.model.get_pred(self.x)
win_pred = [1 if x > 0 else 0 for x in pred]
accuracy = sum([1 if x == y else 0 for x, y in zip(win_pred, self.answers)]) * 1.0 / len(self.answers)
return accuracy
#Returns score linked to probability of winning
def get_win_scores(self, test_x):
if self.pred == None:
self.get_pred(test_x)
self.win_scores = [round((50 + x) / 100, 2) for x in (self.pred)]
return self.win_scores
#Returns cumulative accuracy by win_score
def get_rank_order_acc(self):
if self.win_scores == None:
self.get_win_scores(self.x)
set_ws = sorted(list(set(self.win_scores)))
self.rank_order =
|
pd.DataFrame(index=set_ws)
|
pandas.DataFrame
|
from SPARQLWrapper import SPARQLWrapper, CSV, JSON, XML, POSTDIRECTLY, POST, __agent__
import warnings
import pandas as pd
import io
import time
import json
import re
import urllib.request
from rdflib import Graph, util
from ratelimit import limits, sleep_and_retry
from functools import lru_cache
from kgextension.sparql_helper_helper import get_initial_query_offset, get_initial_query_limit
def regex_string_generator(attribute, filters, logical_connective = "OR"):
"""#TODO
Args:
attribute ([type]): [description]
filters ([type]): [description]
logical_connective (str, optional): [description]. Defaults to "OR".
Raises:
ValueError: [description]
Returns:
[type]: [description]
"""
rules = []
for rule in filters:
rules.append("regex("+attribute+", \""+rule+"\")")
if logical_connective == "OR":
regex_string = " || ".join(rules)
elif logical_connective == "AND":
regex_string = " && ".join(rules)
else:
raise ValueError("Allowed inputs for logical_connective: \"OR\"; \"AND\"")
return regex_string
class Endpoint():
"""Base Endpoint class.
"""
pass
class RemoteEndpoint(Endpoint):
"""RemoteEndpoint class, that handles remote SPARQL endpoints.
"""
def __init__(self, url, timeout=60, requests_per_min = 100000, retries=10, page_size=0, supports_bundled_mode=True, persistence_file_path="rate_limits.db", agent=__agent__):
"""Configuration of a SPARQL Endpoint.
Args:
url (str): URL of the endpoint.
timeout (int, optional): Defines the time which the endpoint is
given to respond (in seconds). Defaults to 60.
requests_per_min (int, optional): Defines the maximal number of
requests per minute. Defaults to 100000.
retries (int, optional): Defines the number of times a query is
retried. Defaults to 10.
page_size (int, optional): Limits the page size of the results,
since many endpoints have limitations. Defaults to 0.
supports_bundled_mode (boolean, optional): If true, bundled mode
will be used to query the endpoint. Defaults to True.
persistence_file_path (str, optional): Sets the file path for the
database that keeps track of past query activities (to comply
with usage policies). Defaults to "rate_limits.db".
agent (str, optional): The User-Agent for the HTTP request header.
Defaults to SPARQLWrapper.__agent__.
"""
self.url = url
self.timeout = timeout
self.requests_per_min = requests_per_min
self.retries = retries
self.page_size = page_size
self.supports_bundled_mode = supports_bundled_mode
self.persistence_file_path = persistence_file_path
self.query = sleep_and_retry(limits(calls=requests_per_min, period=60, storage=self.persistence_file_path, name='"'+url+'"')(self._query))
self.agent = agent
def _query(self, query, request_return_format = "XML", verbose = False, return_XML=False):
"""Function that queries a user-specified remote SPARQL endpoint with a
user-specified query and returnes the results as a pandas DataFrame.
Args:
query (str): Query that should be sent to the SPARQL endpoint.
request_return_format (str, optional): Requesting a specific return
format from the SPARQL endpont (str) - mainly for debugging and
testing. Defaults to "XML".
verbose (bool, optional): Set to True to let the function print
additional information about the returned data - for debugging
and testing. Defaults to False.
return_XML (bool, optional): If True it returns the XML results
instead of a dataframe. Defaults to False.
Raises:
RuntimeError: Is returned when the returned data is neither a XML,
CSV or JSON, for whatever reason.
Returns:
pd.DataFrame: The query results in form of a DataFrame.
"""
retries_count = 0
while True:
try:
sparql = SPARQLWrapper(self.url, agent=self.agent)
sparql.setRequestMethod(POSTDIRECTLY)
sparql.setMethod(POST)
sparql.setTimeout(self.timeout)
sparql.setQuery(query)
return_formats = {"XML": XML, "CSV": CSV, "JSON": JSON}
requested_format = return_formats[request_return_format]
# Catch RuntimeWarning that is raised by SPARQLWrapper when retunred and requested format do not match.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning)
# Try to query with ReturnFormat that is requested through requested_format and check which format is returned
sparql.setReturnFormat(requested_format)
results_raw = sparql.query()
returned_content_type = results_raw.info()["content-type"]
if verbose:
print(returned_content_type)
# If the returned format is XML, query with requested ReturnFormat = XML and process accordingly
if "application/sparql-results+xml" in returned_content_type:
results = results_raw.convert()
if return_XML:
return results
result_dict = {}
result_index = 0
for result_node in results.getElementsByTagName("result"):
temp_result_dict = {}
for binding in result_node.getElementsByTagName("binding"):
attr_name = binding.getAttribute("name")
for childnode in binding.childNodes:
if childnode.firstChild is not None:
value = childnode.firstChild.nodeValue
temp_result_dict.update({attr_name: value})
result_dict[result_index] = temp_result_dict
result_index += 1
return pd.DataFrame.from_dict(result_dict, orient="index")
# If the returned format is JSON, query with requested ReturnFormat = JSON and process accordingly
elif "application/sparql-results+json" in returned_content_type:
results = sparql.query().convert()
results_df = pd.DataFrame(results["results"]["bindings"])
results_df = results_df.applymap(lambda x: x["value"])
return results_df
# If the returned format is CSV, query with requested ReturnFormat = CSV and process accordingly
elif "text/csv" in returned_content_type:
results = results_raw.convert()
results = io.BytesIO(results)
return
|
pd.read_csv(results, delimiter=",", dtype=str)
|
pandas.read_csv
|
import os
from run_checks import run_checks
import atmodat_checklib.utils.output_directory_util as output_directory
from atmodat_checklib.utils.summary_creation_util import extract_overview_output_json, \
extracts_error_summary_cf_check
import pandas as pd
from atmodat_checklib.utils.env_util import set_env_variables
from git import Repo
from pathlib import Path
import json
prio_dict = {'high_priorities': 'Mandatory', 'medium_priorities': 'Recommended', 'low_priorities': 'Optional'}
udunits2_xml_path, atmodat_cvs = set_env_variables()
os.environ['PYESSV_ARCHIVE_HOME'] = os.path.join(atmodat_cvs, 'pyessv-archive')
os.environ['UDUNITS2_XML_PATH'] = udunits2_xml_path
def run_checks_on_files(tmpdir, ifiles):
atmodat_base_path = os.path.join(str(Path(__file__).resolve().parents[2]), '')
run_checks(ifiles, False, ['atmodat', 'CF'], 'auto', tmpdir, atmodat_base_path)
json_summary, cf_errors = create_output_summary(tmpdir)
passed_checks = {}
for file_json in json_summary.keys():
passed_checks[file_json] = {}
if isinstance(json_summary[file_json], pd.DataFrame):
for prio in prio_dict.keys():
passed_checks[file_json][prio] = [0, 0]
checks_prio = json_summary[file_json][prio]
for checks in checks_prio:
for check in checks:
passed_checks[file_json][prio][0] += 1
if check['value'][0] == check['value'][1]:
passed_checks[file_json][prio][1] += 1
return passed_checks
def create_output_summary(opath):
"""main function to create summary output"""
json_summary_out, cf_version, cf_errors_out, cf_warns_out, incorrect_formula_term_error = {}, {}, {}, {}, None
files = output_directory.return_files_in_directory_tree(opath)
for file in files:
if file.endswith("_result.json"):
json_summary_out[file] =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import sys
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, compat
from pandas.util import testing as tm
class TestToCSV:
@pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5),
reason=("Python csv library bug "
"(see https://bugs.python.org/issue32255)"))
def test_to_csv_with_single_column(self):
# see gh-18676, https://bugs.python.org/issue32255
#
# Python's CSV library adds an extraneous '""'
# before the newline when the NaN-value is in
# the first row. Otherwise, only the newline
# character is added. This behavior is inconsistent
# and was patched in https://bugs.python.org/pull_request4672.
df1 = DataFrame([None, 1])
expected1 = """\
""
1.0
"""
with tm.ensure_clean('test.csv') as path:
df1.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected1
df2 = DataFrame([1, None])
expected2 = """\
1.0
""
"""
with tm.ensure_clean('test.csv') as path:
df2.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected2
def test_to_csv_defualt_encoding(self):
# GH17097
df = DataFrame({'col': ["AAAAA", "รรรรร", "รรรรร", "่่่่่"]})
with tm.ensure_clean('test.csv') as path:
# the default to_csv encoding is uft-8.
df.to_csv(path)
tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
def test_to_csv_quotechar(self):
df = DataFrame({'col': [1, 2]})
expected = """\
"","col"
"0","1"
"1","2"
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
expected = """\
$$,$col$
$0$,$1$
$1$,$2$
"""
with
|
tm.ensure_clean('test.csv')
|
pandas.util.testing.ensure_clean
|
import os
import matplotlib.pyplot as plt
import pandas as pd
from src.cellData import cellData
from src.cellExtractOCV import cellExtractOCV
from src.cellSim import cellSim
from src.cellSimHyst import cellSimHyst
from src.plotData import plotData
def main():
"""create list of tests available from dataset"""
pathname = (
"datasets/lg-18650hg2/LG_HG2_Original_Dataset_McMasterUniversity_Jan_2020/"
)
temp = "25degC/"
filenames = [
filename
for filename in os.listdir(pathname + temp)
if filename.endswith(".csv")
]
d =
|
pd.DataFrame(filenames)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
from matplotlib import cm
import seaborn as sns
from dataframe_tools import merge_two_dataframes_as_of
def relative_sma(price_df: pd.DataFrame, sma_lag: int, max_number_of_na: int = 5) -> pd.DataFrame:
"""Assumes price_df is a DataFrame filled with daily prices as values, tickers as column names and observation dates
as index. Assumes that sma_lag and allowed_number_na are int. Returns a DataFrame with the simple moving average
(SMA) divided by the spot."""
sma_df = rolling_average(price_df, sma_lag, max_number_of_na)
relative_sma_df = sma_df / price_df.fillna(method='ffill')
return relative_sma_df
def realized_volatility(multivariate_price_df: pd.DataFrame = None, multivariate_return_df: pd.DataFrame = None,
vol_lag: {int, list, tuple}=60, return_lag: int = 1, annualized_factor: float = 252, allowed_number_na: int = 5,
aggregate_func_when_multiple_lags: str = 'max') -> pd.DataFrame:
"""
Calculates the realized volatility of each column.
:param multivariate_price_df:
:param multivariate_return_df:
:param vol_lag:
:param return_lag: int
:param annualized_factor:
:param allowed_number_na:
:param aggregate_func_when_multiple_lags:
:return:
"""
if multivariate_price_df is None and multivariate_return_df is None:
raise ValueError('Need to specify multivariate_return_df or multivariate_price_df.')
elif multivariate_return_df is not None and multivariate_price_df is not None:
raise ValueError('Can only specify one of multivariate_return_df and multivariate_price_df.')
return _rolling_calc(multivariate_df=multivariate_price_df if multivariate_return_df is None else multivariate_return_df,
lag_parameter=vol_lag, return_lag=return_lag,
convert_to_returns=multivariate_return_df is None,
function='realized_volatility', aggregate_method=aggregate_func_when_multiple_lags,
max_number_of_na=allowed_number_na, annualized_factor=annualized_factor,
minimum_allowed_lag=2)
def beta(multivariate_price_df: pd.DataFrame = None, multivariate_return_df: pd.DataFrame = None, beta_price_df: pd.DataFrame = None,
beta_lag: {int, list, tuple}=252, return_lag: int = 1, allowed_number_na: int = 5,
aggregate_func_when_multiple_lags: str = 'avg') -> pd.DataFrame:
"""
Calculates the beta of each column with respect to a given price DataFrame.
:param multivariate_price_df:
:param multivariate_return_df:
:param beta_price_df:
:param beta_lag:
:param return_lag:
:param allowed_number_na:
:param aggregate_func_when_multiple_lags:
:return:
"""
if multivariate_price_df is None and multivariate_return_df is None:
raise ValueError('Need to specify multivariate_return_df or multivariate_price_df.')
elif multivariate_return_df is not None and multivariate_price_df is not None:
raise ValueError('Can only specify one of multivariate_return_df and multivariate_price_df.')
return _rolling_calc(
multivariate_df=multivariate_price_df if multivariate_return_df is None else multivariate_return_df,
price_data_for_beta_calc_df=beta_price_df,
lag_parameter=beta_lag, return_lag=return_lag,
convert_to_returns=multivariate_return_df is None,
function='beta', aggregate_method=aggregate_func_when_multiple_lags,
max_number_of_na=allowed_number_na,
minimum_allowed_lag=2)
def rolling_average(data_df: pd.DataFrame, avg_lag: int, max_number_of_na: {int, None}=5) -> pd.DataFrame:
"""
Calculates a rolling average. If nan, value is rolled forward except when number of consecutive nan exceeds
max_number_of_na.
:param data_df: pandas.DataFrame
:param avg_lag: int
:param max_number_of_na: int (default None i.e. all nan are rolled forward)
:return: pandas.DataFrame
"""
return _rolling_calc(data_df, avg_lag, False, 'average', max_number_of_na=max_number_of_na, minimum_allowed_lag=1)
def _rolling_calc(multivariate_df: pd.DataFrame, lag_parameter: {int, tuple, list}, convert_to_returns: bool, function: str,
aggregate_method: str = None, max_number_of_na: int = 5, return_lag: int = 1,
annualized_factor: float = 252, price_data_for_beta_calc_df: pd.DataFrame = None, minimum_allowed_lag: int = 2):
"""
Loops through each lag parameter and calculates a function over the DataFrame
:param multivariate_df: pd.DataFrame
:param lag_parameter: int, tuple or list
:param convert_to_returns: bool
:param function: str
:param aggregate_method: str
:param max_number_of_na: int
:param return_lag: int
:param annualized_factor: float
:param price_data_for_beta_calc_df: pd.DataFrame
:param minimum_allowed_lag: int
:return: pd.DataFrame
"""
# check parameters
lag_parameter = _parameter_input_check(lag_parameter, minimum_allowed_lag)
if multivariate_df.shape[0] < max(lag_parameter) + convert_to_returns + return_lag - 1:
raise ValueError('multivariate_df needs to have at least {} rows.'.format(max(lag_parameter) + convert_to_returns + return_lag - 1))
col_list = multivariate_df.columns[multivariate_df.iloc[return_lag:, :].isna().any()].tolist()
col_with_only_values = multivariate_df.columns[~multivariate_df.iloc[return_lag:, :].isna().any()].tolist()
if len(col_with_only_values):
col_list.append(col_with_only_values)
result_df = None
for lag in lag_parameter:
result_sub_df = pd.DataFrame(index=multivariate_df.index)
for col_name in col_list:
if not isinstance(col_name, list):
# if col_name is not a list then that means that there are nan in the column
col_name = [col_name]
df_clean = multivariate_df.loc[:, col_name]
df_clean.dropna(inplace=True)
else:
# list of columns that does not have any nan
df_clean = multivariate_df.loc[:, col_name]
if convert_to_returns:
df_clean = df_clean.pct_change(return_lag)
# here is where the main calculation is done
df_clean = _function_calc(df=df_clean, func_name=function, lag=lag, return_lag=return_lag,
price_data_for_beta_calc_df=price_data_for_beta_calc_df,
annualized_factor=annualized_factor)
result_sub_df = result_sub_df.join(df_clean)
result_sub_df = result_sub_df[list(multivariate_df)]
if result_df is None:
result_df = result_sub_df
elif aggregate_method.lower() in ['max', 'maximum']:
result_df = pd.concat([result_df, result_sub_df]).max(level=0, skipna=False)
elif aggregate_method.lower() in ['mean', 'average', 'avg']:
result_df = pd.concat([result_df, result_sub_df]).mean(level=0, skipna=False)
else:
if aggregate_method is None:
raise ValueError("Need to specify aggregate_method when specifying a list of lag parameters.")
else:
raise ValueError("'{}' is not a recognized aggregation function.".format(aggregate_method.lower()))
result_df.fillna(method='ffill', inplace=True)
result_df = _set_nan_for_missing_data(multivariate_df, result_df, max_number_of_na=max_number_of_na)
return result_df
def _function_calc(df: pd.DataFrame, func_name: str, **kwargs):
if func_name == 'realized_volatility':
df_clean = df.rolling(window=kwargs['lag']).std() * (kwargs['annualized_factor'] ** 0.5)
elif func_name == 'average':
df_clean = df.rolling(window=kwargs['lag']).mean()
elif func_name == 'beta':
if kwargs['price_data_for_beta_calc_df'] is None:
raise ValueError('Need to specify data_for_beta_calc_df when calculating betas.')
else:
df_clean = merge_two_dataframes_as_of(df, kwargs['price_data_for_beta_calc_df'], 'used_for_beta_calc')
df_clean.iloc[:, -1] = df_clean.iloc[:, -1].pct_change(kwargs['return_lag'])
covariance_df = df_clean.rolling(window=kwargs['lag']).cov(df_clean.iloc[:, -1])
variance_df = df_clean.iloc[:, -1].rolling(window=kwargs['lag']).var()
df_clean = covariance_df.divide(variance_df, axis='index')
df_clean = df_clean.iloc[:, :-1] # ignore the column furthest to the right
else:
raise ValueError("'{}' is not a recognized function.".format(func_name.lower()))
return df_clean
def _parameter_input_check(param: {int, tuple, list}, minimum_value: int):
"""
Checks and converts the parameter to a list if necessary.
:param param: int, list
:param minimum_value: int
:return: None
"""
# convert to list if applicable
try:
param[0]
except TypeError:
param = [param]
# check value
if min(param) < minimum_value:
raise ValueError('Parameter value needs to be greater or equal to {}.'.format(minimum_value))
return param
def convert_daily_return_to_daily_price_df(multivariate_daily_return_df: pd.DataFrame, initial_value: float = 1.0):
"""
Takes a multivariate DataFrame assumed to contain daily returns and recomputes a normalized performance DataFrame
:param multivariate_daily_return_df: pd.DataFrame
:param initial_value: float
:return: pd.DataFrame
"""
multivariate_price_df = multivariate_daily_return_df.shift(-1) # shift the daily returns backwards
multivariate_price_df[~multivariate_price_df.isnull()] = 1 # where there is a shifted return set value to 1
multivariate_price_df.iloc[-1, :] = 1 # last rows should be 1 since the shifted value is always nan
multivariate_price_df += multivariate_daily_return_df.fillna(0) # 1 + daily return
multivariate_price_df = multivariate_price_df.cumprod() # (1 + R1) * (1 + R2) * ...
multivariate_price_df *= initial_value
return multivariate_price_df
def _set_nan_for_missing_data(original_multivariate_df: pd.DataFrame, calculated_value_df: pd.DataFrame,
max_number_of_na: int)->pd.DataFrame:
"""
After calculating e.g. Volatility, this script sets the values to NaN if the original DataFrame had a number of
consecutive rows of NaN above a threshold.
:param original_multivariate_df: DataFrame
:param calculated_value_df: DataFrame
:param max_number_of_na: int
:return: DataFrame
"""
# set value to nan if the number of consecutive nan exceeds max_number_of_na
adjustment_df = original_multivariate_df.rolling(window=max_number_of_na + 1, min_periods=1).mean()
eligibility_df = np.where(adjustment_df.isna(), np.nan, 1)
return calculated_value_df * eligibility_df
def rolling_drawdown(price_df: pd.DataFrame, look_back_period: int = None) -> pd.DataFrame:
"""Assumes that price_df is a DataFrame and look_back_period is an int. If look_back_period is not assigned, the
'peak/maximum' will be observed continuously. Returns a DataFrame containing the drawdown for each underlying i.e.
'price' / 'maximum priced over look back period' - 1."""
if look_back_period is None:
look_back_period = len(price_df.index)
if look_back_period < 1:
raise ValueError("look_back_lag' needs to be larger or equal to 1.")
price_df = price_df.fillna(method='ffill').copy()
rolling_max_df = price_df.rolling(window=look_back_period, min_periods=1).max()
drawdown_df = price_df / rolling_max_df - 1.0
return drawdown_df
def exponentially_weighted_return(price_df: pd.DataFrame, lambda_: float) -> pd.DataFrame:
"""
Calculates the exponentially weighted returns
:param price_df: pd.DataFrame containing the closing levels
:param lambda_: the weight
:return: pd.DataFrame
"""
price_return_df = price_df.pct_change()
price_return_df = price_return_df.iloc[1:, :]
number_days = price_return_df.shape[0]
exp_weighting_var = [np.exp(-lambda_ / number_days * (1 + t_day)) for t_day in range(number_days - 1, -1, -1)]
exp_weighting_s = pd.Series(index=price_return_df.index, data=exp_weighting_var)
return price_return_df.mul(exp_weighting_s, axis=0)
def maximum_drawdown(price_df: pd.DataFrame, look_back_period: int = None) -> pd.Series:
"""Assumes that price_df is a DataFrame and look_back_period is an int. If look_back_period is not assigned, the
'peak/maximum' will be observed continuously. Returns a Series containing the maximum drawdown for each underlying
i.e. the lowest 'price' / 'maximum priced over look back period' - 1 observed."""
drawdown_df = rolling_drawdown(price_df, look_back_period)
return drawdown_df.min()
def plot_pairwise_correlation(df: pd.DataFrame):
""" Source: https://seaborn.pydata.org/examples/many_pairwise_correlations.html"""
sns.set(style="white")
corr = df.corr() # Compute the correlation matrix
mask = np.triu(np.ones_like(corr, dtype=np.bool)) # Generate a mask for the upper triangle
cmap = sns.diverging_palette(220, 10, as_cmap=True) # Generate a custom diverging colormap
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.show()
def _check_and_merge_price_weight_df(price_df: pd.DataFrame, weight_df: pd.DataFrame) -> pd.DataFrame:
"""(Used by 'index_calculation')
Assumes price_df and weight_df are both DataFrames. Checks if the two DataFrames have the same column headers and
returns adjusted price and weight DataFrame to have a valid start date."""
price_df = price_df.reindex(sorted(price_df.columns), axis=1) # sort column headers
weight_df = weight_df.reindex(sorted(weight_df.columns), axis=1) # sort column headers
price_df.fillna(method='ffill', inplace=True) # forward fill using previous price if price is NaN
if list(price_df) != list(weight_df):
raise ValueError('The tickers (column headers) of price_df and weight_df are not the same.')
weight_df['rebalance'] = range(len(weight_df.index)) # counter (indicator) for rebalance date
price_index_reset_df = price_df.reset_index()
weight_index_reset_df = weight_df.reset_index()
left_on_col_name = list(price_index_reset_df)[0]
right_on_col_name = list(weight_index_reset_df)[0]
price_weight_df = pd.merge_asof(price_index_reset_df, weight_index_reset_df, left_on=left_on_col_name,
right_on=right_on_col_name,
suffixes=['_price', '_weight']) # merge the price and weight DataFrames
price_weight_df.set_index(price_weight_df[left_on_col_name], inplace=True) # set dates as index
price_weight_df.drop([left_on_col_name, right_on_col_name], inplace=True, axis=1) # remove old 'index' column
weight_column_names = [col_name for col_name in list(price_weight_df) if '_weight' in col_name]
price_weight_df = price_weight_df.dropna(subset=weight_column_names) # remove rows where weights are NaN
price_weight_df['rebalance'] = price_weight_df['rebalance'].diff() # 1: new weight, else 0
price_weight_df.iloc[0, -1] = 1
return price_weight_df
def _calculate_performance(weight_price_df: pd.DataFrame) -> pd.DataFrame:
"""(Used by 'index_calculation')
Assumes weight_price_df is a DataFrame. Returns a new DataFrame with columns containing the performance."""
tickers = [col_name.replace('_price', '') for col_name in list(weight_price_df) if '_price' in col_name]
# Get the price at each rebalance date and then roll the value
for ticker in tickers:
weight_price_df[ticker + '_price_last_rbd'] = weight_price_df[ticker + '_price'] * \
(weight_price_df['rebalance'] == 1)
weight_price_df[ticker + '_price_last_rbd'].replace(0, np.nan, inplace=True)
weight_price_df.fillna(method='ffill', inplace=True) # forward fill
# Calculate the performance
performance_col_name = [ticker + '_performance' for ticker in tickers]
weight_price_df[performance_col_name] = pd.DataFrame(data=weight_price_df.filter(regex='_price$').values /
weight_price_df.filter(regex='_price_last_rbd$').shift(1).values,
index=weight_price_df.index)
# Calculate the weighted performance
weighted_performance_col_names = [ticker + '_weighted_return' for ticker in tickers]
weight_price_df[weighted_performance_col_names] = pd.DataFrame(data=weight_price_df.filter(regex='_weight$').shift(1).values * \
(weight_price_df.filter(regex='_performance$').values - 1.0),
index=weight_price_df.index)
return weight_price_df
def index_calculation(price_df: pd.DataFrame, weight_df: pd.DataFrame, transaction_cost: float = 0, fee: float = 0,
initial_amount: float = 100.0) -> pd.DataFrame:
"""Assumes price_df and weight_df are both DataFrames that have the same column headers, transaction_cost and fee
are floats. Returns an index as a DataFrame."""
if transaction_cost < 0:
raise ValueError('transaction_cost needs to be equal or greater to 0.')
weight_price_df = _check_and_merge_price_weight_df(price_df, weight_df)
index_calculation_df = _calculate_performance(weight_price_df)
index_calculation_df['transaction_cost'] = index_calculation_df.filter(regex='_weight$').diff().abs().sum(axis=1) \
* transaction_cost
index_calculation_df['gross_return'] = index_calculation_df.filter(regex='_weighted_return$').sum(axis=1)
# Calculate the index
index_calculation_df['index'] = np.nan # Initialize the column
index_calendar = index_calculation_df.index
index_last_rbd = initial_amount
last_rbd = index_calculation_df.index[0]
index_calculation_df.loc[last_rbd, 'index'] = index_last_rbd
for date in index_calendar[1:]:
accumulated_fee = (date - last_rbd).days / 360.0 * fee
index_calculation_df.loc[date, 'index'] = index_last_rbd * (1.0 + index_calculation_df.loc[date, 'gross_return']
- accumulated_fee
- index_calculation_df.loc[date, 'transaction_cost'])
if index_calculation_df.loc[date, 'rebalance']: # if it is a rebalance date
index_last_rbd = index_calculation_df.loc[date, 'index'] # new index since last rebalance date
last_rbd = date # new rebalance date
return index_calculation_df
def index_daily_rebalanced(multivariate_daily_returns: pd.DataFrame, weights: pd.DataFrame,
transaction_costs: float = 0, rolling_fee_pa: float = 0, weight_smoothing_lag: int = 1,
weight_observation_lag: int = 1, initial_value: float = 100, volatility_target: float = None,
volatility_lag: {int, list, tuple}=60, risky_weight_cap: float = 1,
market_price_df: pd.DataFrame = None, beta_lag: {int, list, tuple}=252,
beta_hedge_carry_cost_pa: float = 0.0, beta_return_lag: int = 1):
# calculate the gross return of the index
multivariate_daily_returns.iloc[1:, :].fillna(0, inplace=True)
num_instruments = multivariate_daily_returns.shape[1]
index_result = merge_two_dataframes_as_of(multivariate_daily_returns, weights, '_WEIGHT')
index_result.iloc[:, num_instruments:] = index_result.iloc[:, num_instruments:].rolling(window=weight_smoothing_lag, min_periods=1).mean() # smooth the weights to reduce turnover
index_result[[col_name + '_WEIGHTED_RETURN' for col_name in list(multivariate_daily_returns)]] = index_result.iloc[:, :num_instruments] * index_result.iloc[:, num_instruments:].shift(weight_observation_lag).values
# make the index 'market-neutral' by adding a short position where the allocation depends on the realized beta
if market_price_df is not None:
index_result = _add_beta_hedge(index_result=index_result, multivariate_daily_returns=multivariate_daily_returns,
market_price_df=market_price_df, beta_lag=beta_lag,
beta_return_lag=beta_return_lag, num_instruments=num_instruments,
weight_observation_lag=weight_observation_lag)
market_neutral = True
num_instruments += 1
else:
market_neutral = False
# index_result = _add_beta_hedge(index_result)
index_result['GROSS_INDEX_RETURN'] = index_result.iloc[:, 2 * num_instruments:].sum(axis=1, skipna=False)
# add volatility target mechanism if applicable
if volatility_target:
index_result = _add_volatility_target(index_result, volatility_target, volatility_lag, risky_weight_cap, num_instruments,
weight_observation_lag)
# calculate the gross index
index_result['GROSS_INDEX'] = initial_value * (1 + index_result['GROSS_INDEX_RETURN']).cumprod()
start_of_index_i = index_result['GROSS_INDEX_RETURN'].index.get_loc(index_result['GROSS_INDEX_RETURN'].first_valid_index()) - 1
index_result.iloc[start_of_index_i, -1] = initial_value
# adjust index for transaction costs and index fees if any
if transaction_costs != 0 or rolling_fee_pa != 0 or market_neutral * beta_hedge_carry_cost_pa != 0:
# add a column with the net index return
index_result = _add_net_index_return(index_result=index_result, transaction_costs=transaction_costs,
rolling_fee_pa=rolling_fee_pa, number_of_instruments=num_instruments,
beta_carry_cost=market_neutral * beta_hedge_carry_cost_pa,
weight_observation_lag=weight_observation_lag)
# calculate the net index
index_result['NET_INDEX'] = initial_value * (1 + index_result['NET_INDEX_RETURN']).cumprod()
index_result.iloc[start_of_index_i, -1] = initial_value
return index_result
def _add_beta_hedge(index_result: pd.DataFrame, multivariate_daily_returns: pd.DataFrame, market_price_df: pd.DataFrame,
beta_lag: {int, list}, beta_return_lag: int, num_instruments: int, weight_observation_lag: int):
# add the daily returns of the market instrument
index_result = merge_two_dataframes_as_of(index_result, market_price_df.pct_change(), left_suffix='BETA_INSTRUMENT')
# calculate the weight of the short position
if beta_return_lag > 1:
# convert the daily returns back to performance data
multivariate_price_df = multivariate_daily_returns.shift(-1) # shift the daily returns backwards
multivariate_price_df[~multivariate_price_df.isnull()] = 1 # where there is a shifted return set value to 1
multivariate_price_df.iloc[-1, :] = 1 # last rows should be 1 since the shifted value is always nan
multivariate_price_df += multivariate_daily_returns.fillna(0) # 1 + daily return
multivariate_price_df = multivariate_price_df.cumprod() # (1 + R1) * (1 + R2) * ...
beta_per_stock_df = beta(multivariate_price_df=multivariate_price_df, beta_price_df=market_price_df,
beta_lag=beta_lag, return_lag=beta_return_lag)
else:
beta_per_stock_df = beta(multivariate_return_df=multivariate_daily_returns, beta_price_df=market_price_df,
beta_lag=beta_lag)
# calculate the weighted average across all stock betas
weighted_beta_df = pd.DataFrame(
data=(beta_per_stock_df * index_result.iloc[:, num_instruments: 2 * num_instruments].values).sum(axis=1,
skipna=False),
columns=['BETA_WEIGHT'])
weighted_beta_df *= -1 # since you are shorting the market
index_result = index_result.join(weighted_beta_df)
# calculate the short beta position
index_result[list(index_result)[-2] + '_WEIGHTED_RETURN'] = index_result.iloc[:, -2] * \
index_result.iloc[:,-1]\
.shift(weight_observation_lag).values
# rearrange the columns
instrument_columns = list(index_result)[:num_instruments]
instrument_columns.append(list(index_result)[-3]) # add the beta instrument
weight_columns = list(index_result)[num_instruments: 2 * num_instruments]
weight_columns.append(list(index_result)[-2]) # add the beta weight
weighted_return_columns = [col_name for col_name in list(index_result) if col_name.endswith('_WEIGHTED_RETURN')]
all_col_names = instrument_columns + weight_columns + weighted_return_columns
index_result = index_result[all_col_names]
return index_result
def _add_volatility_target(index_result: pd.DataFrame, volatility_target: float, volatility_lag: {int, list},
risky_weight_cap: float, number_of_instruments: int, weight_observation_lag: int):
# calculate the risky weight
realized_volatility_gross_index = realized_volatility(multivariate_return_df=index_result[['GROSS_INDEX_RETURN']], vol_lag=volatility_lag)
risky_weight = volatility_target / realized_volatility_gross_index
risky_weight[risky_weight >= risky_weight_cap] = risky_weight_cap
index_result = index_result.join(pd.DataFrame(data=risky_weight.values, index=risky_weight.index, columns=['RISKY_WEIGHT']))
# add new weights columns with weights adjusted based on the risky weight
weight_post_vt_col_names = [col_name + '_WEIGHT_POST_VT' for col_name in list(index_result)[:number_of_instruments]]
index_result[weight_post_vt_col_names] = index_result.iloc[:, number_of_instruments: 2 * number_of_instruments].multiply(index_result['RISKY_WEIGHT'], axis=0)
# adjust the weighted returns and gross index returns
weighted_return_col_names = [col_name + '_WEIGHTED_RETURN' for col_name in list(index_result)[:number_of_instruments]]
index_result[weighted_return_col_names] = index_result.iloc[:, :number_of_instruments] * index_result.loc[:, weight_post_vt_col_names].shift(weight_observation_lag).values
index_result['GROSS_INDEX_RETURN'] = index_result.loc[:, weighted_return_col_names].sum(axis=1, skipna=False)
return index_result
def _add_net_index_return(index_result: pd.DataFrame, transaction_costs: float, rolling_fee_pa: float,
number_of_instruments: int, beta_carry_cost: float, weight_observation_lag: int):
# calculate the index net of transaction costs
weight_col_names = [col_name for col_name in list(index_result) if col_name.endswith('_WEIGHT_POST_VT')]
if not len(weight_col_names):
weight_col_names = list(index_result)[number_of_instruments: 2 * number_of_instruments]
abs_weight_delta = index_result[weight_col_names].diff().abs().sum(axis=1)
index_result['NET_INDEX_RETURN'] = index_result['GROSS_INDEX_RETURN'] - transaction_costs * abs_weight_delta.values
# calculate the index net of index fees
dt = [None] + [(index_result.index[n] - index_result.index[n - 1]).days / 365 for n in
range(1, len(index_result.index))]
dt_s = pd.Series(data=dt, index=index_result.index)
index_result['NET_INDEX_RETURN'] -= rolling_fee_pa * dt_s
# calculate the index net of the rolling cost of carrying the short beta position
index_result['NET_INDEX_RETURN'] += index_result['BETA_WEIGHT'].shift(weight_observation_lag) * dt_s.values * beta_carry_cost
return index_result
def monthly_return_table(price_df: pd.DataFrame, include_first_monthly_return: bool = True) -> {list, pd.DataFrame}:
"""Assumes price_df is a DataFrame. Returns a DataFrame with monthly returns and yearly returns. If price_df has
more than one column, script returns a list filled with DataFrames."""
month_name_dict = {'Jan': 0, 'Feb': 1, 'Mar': 2, 'Apr': 3, 'May': 4, 'Jun': 5, 'Jul': 6, 'Aug': 7, 'Sep': 8,
'Oct': 9, 'Nov': 10, 'Dec': 11} # dictionary used to name the columns in the table
price_df = price_df.copy()
tickers = list(price_df)
price_df['Month'] = price_df.index.strftime('%b')
price_df['Year'] = price_df.index.strftime('%Y')
# within each month, roll forward if there is nan
clean_price_df = price_df.groupby(['Month', 'Year'], as_index=False).apply(lambda group: group.ffill())
monthly_price_df = clean_price_df.resample('BM').apply(lambda x: x[-1]) # look at last day of each month
# calculate the returns only for the price columns and not month and year columns
monthly_return_df = monthly_price_df.apply(lambda x: x.pct_change() if x.name in tickers else x)
if include_first_monthly_return: # calculate the return of the first 'broken' month
first_available_price_df = clean_price_df.groupby(['Month', 'Year'], as_index=False).apply(lambda group: group.bfill()).resample('BM').apply(lambda x: x[0])
for ticker in tickers:
# this looks messy but it works :)
first_available_value = first_available_price_df.loc[first_available_price_df[ticker].notna().idxmax(),
ticker]
first_end_of_month_index_pos = monthly_return_df.index.get_loc(monthly_return_df[ticker].notna().idxmax())
first_available_end_of_month_value = monthly_price_df.iloc[first_end_of_month_index_pos - 1,
tickers.index(ticker)]
first_month_return = first_available_end_of_month_value / first_available_value - 1.0
monthly_return_df.iloc[first_end_of_month_index_pos - 1, tickers.index(ticker)] = first_month_return
column_names = list(set(month_name_dict.keys()) & set(monthly_return_df['Month'].values)) # intersection
column_names.sort(key=lambda name: month_name_dict[name]) # sort the columns in monthly order
if len(tickers) == 1: # only one underlying
monthly_return_table_df = monthly_return_df.pivot(index='Year', columns='Month', values=tickers[0])
monthly_return_table_df = monthly_return_table_df[column_names] # correct the column order
monthly_return_table_df['Yearly return'] = monthly_return_table_df.add(1.0).fillna(1.0).cumprod(axis=1).iloc[:, -1] - 1.0
return monthly_return_table_df
else:
monthly_return_table_df_list = []
for ticker in tickers:
monthly_return_table_df = monthly_return_df.pivot(index='Year', columns='Month', values=ticker)
monthly_return_table_df = monthly_return_table_df[column_names] # correct the column order
monthly_return_table_df['Yearly return'] = monthly_return_table_df.add(1.0).fillna(1.0).cumprod(axis=1).iloc[:, -1] - 1.0
monthly_return_table_df_list.append(monthly_return_table_df)
return monthly_return_table_df_list
def return_and_risk_analysis(underlying_price_df: pd.DataFrame, has_benchmark=False, print_results=True,
start_date=None, end_date=None, normalize: bool = True) -> dict:
"""Assumes that underlying_price_df is a DataFrame with prices in each column and dates as index, has_benchmark is
boolean. Calculates annual returns, annual volatility drawdown and return distributions.
If has_benchmark is True (only works if there are two columns) function will return active return and information
ratio. Returns a dictionary with names (string) as keys and DataFrames as values. This can later easely be saved in
an Excel workbook."""
if has_benchmark and len(underlying_price_df.columns) != 2:
raise ValueError("Price DataFrame needs to have only two columns: 1st is your strategy and the 2nd is your "
"benchmark. Currently the DataFrame has {} column(s).".format(len(underlying_price_df.columns)))
underlying_price_df = underlying_price_df.copy()
underlying_price_df.dropna(inplace=True) # drops each row if there exists a NaN in either column
if start_date:
underlying_price_df = underlying_price_df[start_date:]
if end_date:
underlying_price_df = underlying_price_df[:end_date]
if normalize:
performance_df = underlying_price_df / underlying_price_df.iloc[0, :] * 100.0
else:
performance_df = underlying_price_df
# annual return
rolling_1y_return_df = underlying_price_df.pct_change(252).dropna()
avg_1y_return_s = rolling_1y_return_df.mean()
# annual volatility
rolling_1y_volatility_df = underlying_price_df.pct_change().rolling(window=252).std().dropna() * math.sqrt(252)
avg_1y_volatility_s = rolling_1y_volatility_df.mean()
# sharpe ratio
sharpe_ratio_s = avg_1y_return_s / avg_1y_volatility_s
# maximum drawdown
rolling_1y_drawdown_df = rolling_drawdown(underlying_price_df, 252)
max_drawdown_s = maximum_drawdown(underlying_price_df)
# combine the Series into a DataFrame
risk_return_table_df = pd.concat([avg_1y_return_s, avg_1y_volatility_s, sharpe_ratio_s, max_drawdown_s],
axis=1).transpose()
# setup a dictionary with sheet names as keys and DataFrames as values
sheet_name_df_dict = {'Performance': performance_df, 'Risk and return': risk_return_table_df,
'Rolling 1Y return': rolling_1y_return_df, 'Rolling 1Y volatility': rolling_1y_volatility_df,
'Rolling 1Y drawdown': rolling_1y_drawdown_df}
if has_benchmark:
# calculate the rolling active return and information ratio
rolling_1y_active_return_s = rolling_1y_return_df.iloc[:, 0] - rolling_1y_return_df.iloc[:, 1]
rolling_standard_error_s = rolling_1y_active_return_s.rolling(window=252).std()
rolling_1y_information_ratio_s = (rolling_1y_active_return_s / rolling_standard_error_s).dropna()
avg_1y_active_return_df = pd.DataFrame(data=[[rolling_1y_active_return_s.mean(), '--']],
columns=list(underlying_price_df))
avg_1y_information_ratio_df = pd.DataFrame(data=[[rolling_1y_information_ratio_s.mean(), '--']],
columns=list(underlying_price_df))
# add active returns and information ratio to the table DataFrame
risk_return_table_df =
|
pd.concat([risk_return_table_df, avg_1y_active_return_df, avg_1y_information_ratio_df])
|
pandas.concat
|
from unittest import TestCase
from os import path as os_path
import shutil
from pandas import DatetimeIndex, DataFrame, Series, Timedelta, infer_freq, \
Timestamp
import numpy as np
from plotly.graph_objects import Figure
from matplotlib.axes import Axes
from timeatlas import TimeSeries, Metadata
from timeatlas.config.constants import *
class TestTimeSeries(TestCase):
def setUp(self) -> None:
# Create a time indexed series
index = DatetimeIndex(['2019-01-01', '2019-01-02',
'2019-01-03', '2019-01-04'])
self.my_data = Series([0.4, 1.0, 0.7, 0.6], index=index).to_frame()
# Create metadata
my_unit = {
"name": "power",
"symbol": "W",
"data_type": "float"
}
my_coordinates = {
"lat": 46.796611,
"lon": 7.147563
}
my_dict = {
"unit": my_unit,
"coordinates": my_coordinates
}
self.my_metadata = Metadata(my_dict)
# self.my_time_series = TimeSeries(self.my_series, self.my_metadata)
self.my_time_series = TimeSeries(self.my_data)
# Define a target directory
self.target_dir = "data/test-export"
def test__init__is_instance(self):
my_time_series = TimeSeries()
self.assertIsInstance(my_time_series, TimeSeries,
"The TimeSeries hasn't the right type")
def test__init__has_right_types(self):
# Add some data
index = DatetimeIndex(['2019-01-01', '2019-01-02',
'2019-01-03', '2019-01-04'])
my_series = DataFrame([0.4, 1.0, 0.7, 0.6], index=index)
my_metadata = Metadata()
my_ts = TimeSeries(my_series, my_metadata)
# Check types
self.assertIsInstance(my_ts._data, DataFrame,
"The TimeSeries series is not a Pandas DataFrame")
self.assertIsInstance(my_ts.metadata, Metadata,
"The TimeSeries Metadata hasn't got the right type")
def test__init__contains_metadata(self):
# Add some data
index = DatetimeIndex(['2019-01-01', '2019-01-02',
'2019-01-03', '2019-01-04'])
my_series = DataFrame([0.4, 1.0, 0.7, 0.6], index=index)
my_metadata = Metadata()
my_ts = TimeSeries(my_series, my_metadata)
# Check types
self.assertNotEqual(my_ts.metadata, None,
"The TimeSeries Metadata is probably None")
def test__init__has_values_as_column_name(self):
index =
|
DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04'])
|
pandas.DatetimeIndex
|
#!/usr/bin/env python3
"""Exploratory Data Analysis Module for Alzheimer's Capstone 1.
This module contains functions used for Data Storytelling.
The functions within wrangle and format data for visualizations.
Inputs for these functions can be obtained using adnidatawrangling module.
Required modules for all functions to run: pandas, numpy, matplotlib.pyplot,
seaborn all using standard namespaces.
"""
if 'pd' not in globals():
import pandas as pd
if 'np' not in globals():
import numpy as np
if 'plt' not in globals():
import matplotlib.pyplot as plt
if 'sns' not in globals():
import seaborn as sns
sns.set()
def get_bl_data(adni_comp, clin_data, scan_data):
"""This function extracts the data from the baseline visit only for each patient.
Supply the three dataframes adni_comp, clin_data, and scan_data as input.
"""
# extract the baseline data only
adni_bl = adni_comp[adni_comp.EXAMDATE == adni_comp.EXAMDATE_bl]
clin_bl = clin_data[clin_data.EXAMDATE == clin_data.EXAMDATE_bl]
scan_bl = scan_data[scan_data.EXAMDATE == scan_data.EXAMDATE_bl]
# return the three dataframes
return adni_bl, clin_bl, scan_bl
def plot_gender_counts(adni_bl):
"""This function plots gender counts from the complete data.
Supply adni_bl as input to ensure only 1 row per patient.
"""
# construct and show the plot
adni_bl['PTGENDER'].value_counts().plot(kind='bar', rot=0)
plt.xlabel('Gender')
plt.ylabel('Number of Patients')
plt.title('Number of Patients of Each Gender')
plt.show()
def plot_bl_diag(adni_bl):
"""This function plots baseline data.
Supply adni_bl as the argument and unpack the x, y variables for plotting.
"""
x = ['CN', 'SMC', 'EMCI', 'LMCI', 'AD']
y = [adni_bl.DX_bl.value_counts()['CN'], adni_bl.DX_bl.value_counts()['SMC'],
adni_bl.DX_bl.value_counts()['EMCI'], adni_bl.DX_bl.value_counts()['LMCI'],
adni_bl.DX_bl.value_counts()['AD']]
# construct and show the plot
sns.barplot(x=x, y=y)
plt.xlabel('Diagnosis')
plt.ylabel('Number of Patients')
plt.title('Baseline Diagnosis')
plt.show()
def plot_final_diag(adni_comp):
"""This function extracts the final diagnosis counts for plotting.
Supply adni_comp as input and extract the x, y data for plotting.
"""
# filter the most advanced diagnosis for each patient
dx_final = adni_comp.groupby('RID')['DX'].max()
# get the frequency of occurrence
dx_counts = dx_final.value_counts()
# extract the data and assign to x, y
x = ['CN', 'MCI', 'AD']
y = [dx_counts['CN'], dx_counts['MCI'], dx_counts['AD']]
# construct and show the plot
sns.barplot(x=x, y=y)
plt.xlabel('Diagnosis')
plt.ylabel('Number of Patients')
plt.title('Final Diagnosis')
plt.show()
def plot_bl_final_diag(adni_comp):
"""This function extracts baseline and final diagnosis data and plots it.
Provide the adni_comp df as input and extract 'diag' df for plotting.
Use the following call to plot:
sns.barplot(x='diagnosis', y='counts', hue='visit', data=diag)
Add appropriate titles, labels as needed.
"""
# filter the most advanced diagnosis for each patient
dx_final = adni_comp.groupby('RID')['DX'].max()
# get the frequency of occurrence
dx_counts = dx_final.value_counts()
# extract the basline diagnosis counts, using bl2
DX_bl2 = adni_comp.groupby('RID')['DX_bl2'].min()
DX_bl2_counts = DX_bl2.value_counts()
# join baseline and final diagnosis data for comparison
counts = [DX_bl2_counts[1], DX_bl2_counts[0], DX_bl2_counts[2], dx_counts[2], dx_counts[1], dx_counts[0]]
dx_comp = {'visit': ['Baseline', 'Baseline', 'Baseline', 'Final', 'Final', 'Final'],
'diagnosis': ['CN', 'MCI', 'AD', 'CN', 'MCI', 'AD'],
'counts': counts}
# create dataframe using dx_comp
diag =
|
pd.DataFrame(dx_comp)
|
pandas.DataFrame
|
#%% [markdown]
## Project Name: covid_misinformation
### Program Name: CoronaV_Trends.py
### Purpose: To download google trends data related to coronavirus.
##### Date Created: Apr 8th 2020
####
# Pytrends Documentation:https://github.com/GeneralMills/pytrends
#%% [markdown]
from IPython import get_ipython
get_ipython().magic('reset -sf')
import datetime
from datetime import datetime as dt
from datetime import date
import os
import pathlib
import colorlover as cl
import plotly.graph_objs as go
import chart_studio.plotly as py
import plotly.express as px
import pandas as pd
from pytrends.request import TrendReq
APP_PATH = str(pathlib.Path(__file__).parent.resolve())
pytrends = TrendReq(hl='en-US', tz=360, retries=2, backoff_factor=0.1)
#%% [markdown]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Bat soup theory~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
today=datetime.date(2020,4,26)
search_time='2020-01-01 '+str(today)
searches_bat=[
'bat soup',
'coronavirus bat soup',
'china bat soup',
'chinese bat soup',
'wuhan bat soup',
'bat soup virus',
]
groupkeywords = list(zip(*[iter(searches_bat)]*1))
groupkeywords = [list(x) for x in groupkeywords]
# Download search interest of bat key words
dicti = {}
i = 1
for trending in groupkeywords:
pytrends.build_payload(
trending,
timeframe = search_time,
)
dicti[i] = pytrends.interest_over_time()
i+=1
result = pd.concat(dicti, axis=1)
result.columns = result.columns.droplevel(0)
result = result.drop('isPartial', axis = 1)
#result['date']=result.index.date
result.to_csv(os.path.join(APP_PATH, 'data', 'GoogleTrends','GT_bat_global_'+str(today)+'.csv'), header=True)
#%% [markdown]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Wuhan Lab theory~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
today=datetime.date(2020,5,4)
search_time='2020-01-01 '+str(today)
searches_wuhanlab=[
'wuhan virus lab',
'wuhan lab corona virus',
'virus lab in wuhan',
'wuhan bio lab',
'wuhan virology lab',
'wuhan p4 lab',
'wuhan level 4 lab',
'wuhan bsl-4 lab',
]
groupkeywords = list(zip(*[iter(searches_wuhanlab)]*1))
groupkeywords = [list(x) for x in groupkeywords]
#%% [markdown]
# Download search interest of wuhanlab key words
dicti = {}
i = 1
for trending in groupkeywords:
pytrends.build_payload(
trending,
timeframe = search_time,
)
dicti[i] = pytrends.interest_over_time()
i+=1
result = pd.concat(dicti, axis=1)
result.columns = result.columns.droplevel(0)
result = result.drop('isPartial', axis = 1)
result.to_csv(os.path.join(APP_PATH, 'data', 'GoogleTrends','GT_wuhanlab_global_'+str(today)+'.csv'), header=True)
#%% [markdown]
# Select one week Jan 24th - Jan 30th to downolad google trend by state
search_times=[
'2020-01-24 2020-01-24',
'2020-01-25 2020-01-25',
'2020-01-26 2020-01-26',
'2020-01-27 2020-01-27',
'2020-01-28 2020-01-28',
'2020-01-29 2020-01-29'
]
#search_times=[
# '2020-04-14 2020-4-14',
# '2020-04-15 2020-4-15',
# '2020-04-16 2020-4-16',
# '2020-04-17 2020-4-17',
# '2020-04-18 2020-4-18',
# '2020-04-19 2020-4-19',
#]
#%% [markdown]
j=1
for trending in groupkeywords:
dicti = {}
i = 1
for d in search_times:
pytrends.build_payload(trending, geo='US', timeframe=d)
dicti[i]=pytrends.interest_by_region( inc_low_vol=True, inc_geo_code=False)
dicti[i]['date']=search_times[i-1][:10]
i+=1
if j==1:
result = pd.concat(dicti, axis=0)
result = result.reset_index()
result = result.drop(['level_0'], axis=1)
else:
x=pd.concat(dicti, axis=0)
x=x.reset_index().drop(['level_0', 'geoName','date'], axis=1)
result=
|
pd.concat([result,x], axis=1)
|
pandas.concat
|
import pytest
import pandas as pd
from collections import ChainMap
import datetime
import pyarrow as pa
from arize.pandas.logger import Schema
from arize.utils.types import Environments, ModelTypes
from arize.pandas.validation.validator import Validator
import arize.pandas.validation.errors as err
def test_zero_errors():
errors = Validator.validate_types(**kwargs)
assert len(errors) == 0
# may need to revisit this case
def test_reject_all_nones():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"A": pd.Series([None])})
),
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidTypeFeatures
def test_invalid_type_prediction_id():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{"prediction_id": pd.Series([datetime.datetime.now()])}
)
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidType
def test_invalid_type_prediction_id_float():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_id": pd.Series([3.14])})
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidType
def test_invalid_type_timestamp():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_timestamp": pd.Series(["now"])})
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidType
def test_valid_timestamp_datetime():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{"prediction_timestamp": pd.Series([datetime.datetime.now()])}
)
)
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_timestamp_date():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[datetime.datetime.now().date()]
)
}
)
)
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_timestamp_float():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[datetime.datetime.now().timestamp()]
)
}
)
)
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_timestamp_int():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[int(datetime.datetime.now().timestamp())]
)
}
)
)
},
kwargs,
)
)
assert len(errors) == 0
def test_invalid_type_features():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"A": pd.Series([list()])})
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidTypeFeatures
def test_invalid_type_shap_values():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"a": pd.Series([list()])})
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidTypeShapValues
def test_invalid_label():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_label": pd.Categorical([None])})
),
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidType
def test_valid_label_int():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_label": pd.Series([int(1)])})
),
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_label_str():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_label": pd.Series(["0"])})
),
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_label_float():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_label": pd.Series([3.14])})
),
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_label_bool():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_label":
|
pd.Series([True])
|
pandas.Series
|
import enspp.bma as bma
import enspp.vis as vis
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from wrfpywind import data_preprocess as pp
import xarray as xr
# Read in the observational data
obs = pp.fmt_buoy_wspd(
data_path='/share/mzhang/jas983/wrf_data/oshwind/wrfpywind/wrfpywind/data/nyserda_buoy/',
south_dates_str='20190904_20210207', north_dates_str='20190812_20210207',
heights=[20, 40, 60, 80, 100, 120, 140, 160, 180, 200],
start_date='12-01-2019', end_date='12-31-2019')
# Specify the forecast initialization time
t_init = '2019-12-08'
# Fit the BMA model using two historical simulations
fit = bma.read_fmt_fit_bma(t_init, obs, n_days=2, datadir='../data/')
# Read in the ensemble data that you want to correct using BMA
t_init = pd.to_datetime(t_init)
t_end = t_init +
|
pd.DateOffset(days=4)
|
pandas.DateOffset
|
from collections import Counter
import sys
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer
from sklearn.feature_extraction import DictVectorizer
import sklearn.cluster.k_means_
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
from sklearn.cluster import SpectralClustering, DBSCAN
from sklearn. decomposition import PCA, KernelPCA, SparsePCA, TruncatedSVD, IncrementalPCA
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import numpy as np
from nltk.corpus import stopwords
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import csv
import pandas as pd
from pandas.plotting import scatter_matrix
def readAligedCorpus(words, path):
rval = [Counter() for i in range(len(words))]
stop_words = set(stopwords.words('german'))
tree = ET.parse(path)
root = tree.getroot()
body = root.find('body')
for tu in body.findall('tu'):
de = ''
en = ''
for tuv in tu.findall('tuv'):
atr = tuv.attrib
lang = atr.get('{http://www.w3.org/XML/1998/namespace}lang')
if lang == 'de':
for seg in tuv.findall('seg'):
de = seg.text.split()
if lang == 'en':
for seg in tuv.findall('seg'):
en = seg.text.lower()
en_words = en.split()
for i, word in enumerate(words):
if word in en_words:
counter = rval[i]
de = [token.lower() for token in de if token.isalpha() and not token in stop_words]
#whole aligned sentence as BOW
for de_w in de:
counter[de_w] += 1
return rval
def readFile(words, path):
with open(path, 'r', encoding='utf8') as f:
rval = []
stop_words = set(stopwords.words('english'))
rval = [Counter() for i in range(len(words))]
lines = f.readlines()
for line in lines:
tokens = line.split()
for i, word in enumerate(words):
if(word in tokens):
tokens = [token.lower() for token in tokens if token.isalpha() and not token in stop_words]
counter = rval[i]
idx = tokens.index(word)
#bow of 5 (2 on the left | 2 on the right)
bow = tokens[idx-2:idx+3]
#print(bow)
for w in bow:
counter[w] += 1
return rval
corpus = readFile(['apple', 'banana', 'oranges', 'watermelons', 'strawberries', 'grape', 'peach', 'cherry', 'pear', 'plum', 'melon', 'lemon', 'coconut', 'lime',
'office', 'home', 'building', 'house', 'apartment', 'city', 'town', 'village'], 'resources/corpora/OpenSubtitles/small/combined2')
corpus_biling = readAligedCorpus(['apple', 'banana', 'oranges', 'watermelons', 'strawberries', 'grape', 'peach', 'cherry', 'pear', 'plum', 'melon', 'lemon', 'coconut', 'lime',
'office', 'home', 'building', 'house', 'apartment', 'city', 'town', 'village'], 'resources/corpora/OpenSubtitles/very_small_parallel/vsmallaa')
#'apple', 'banana', 'oranges', 'watermelons', 'strawberries', 'grape', 'peach', 'cherry', 'pear', 'plum', 'melon', 'lemon', 'coconut', 'lime',
#'office', 'home', 'building', 'house', 'apartment', 'city', 'town', 'village'
#'shoes', 'shirt', 'pants', 'jacket', 'sweatshirt', 'socks'
#'car', 'plane', 'bicycle', 'motorcycle', 'scooter', 'bus', 'train'
#'new york', 'los angeles', 'chicago', 'houston', 'philadelphia', 'san antonio', 'san diego', 'dallas', 'san jose', 'austin', 'seattle'
#'wind', 'sun', 'water', 'fire'
#'chair', 'table', 'bed', 'closet', 'commode'
#'sister', 'brother', 'father', 'mother'
#'nose', 'eyes', 'mouth', 'face', 'hair'
vectorizer = DictVectorizer()
X = vectorizer.fit_transform(corpus).toarray()
vectorizer_biling = DictVectorizer()
X_biling = vectorizer_biling.fit_transform(corpus_biling).toarray()
X_combined = np.hstack((X, X_biling))
#sc = StandardScaler()
#X_std = sc.fit_transform(X)
#sc_biling = StandardScaler()
#X_biling_std = sc_biling = sc_biling.fit_transform(X_biling)
sc_combined = StandardScaler()
X_combined_std = sc_combined.fit_transform(X_combined)
#pca = PCA(n_components=2)
pca = KernelPCA(kernel='rbf')
#pca = SparsePCA()
#pca = TruncatedSVD()
#pca = IncrementalPCA()
#X_pca = pca.fit_transform(X_std)
#X_biling_pca = pca.fit_transform(X_biling_std)
X_combined_pca = pca.fit_transform(X_combined_std)
#kmeans = KMeans(n_clusters=2, init='random').fit(X_pca)
#f = kmeans.predict(X_pca)
#kmeans = KMeans(n_clusters=2, init='random').fit(X_biling_pca)
#f = kmeans.predict(X_biling_pca)
kmeans = KMeans(n_clusters=2, init='random').fit(X_combined_pca)
f = kmeans.predict(X_combined_pca)
print(f)
print('contains number one x times: ', list(f).count(1))
print('contains number zero x times: ', list(f).count(0))
#plot function from my warmup assignment
def plot(f):
arr = np.array(f)
if arr.shape[1] == 2:
x1 = arr[:, 0]
x2 = arr[:, 1]
plt.scatter(x1, x2)
plt.show()
elif arr.shape[1] == 3:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = []
y = []
z = []
for line in f:
x.append(float(line[0]))
y.append(float(line[1]))
z.append(float(line[2]))
ax.scatter(x, y, z, c='r', marker='o')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
else:
m = np.array(f, dtype=float)
# first make some fake data with same layout as yours
data = pd.DataFrame(m)
# now plot using pandas
|
scatter_matrix(data, alpha=0.2, figsize=(6, 6), diagonal='kde')
|
pandas.plotting.scatter_matrix
|
"""
Static Analyzer qualification infrastructure.
This source file contains all the functionality related to benchmarking
the analyzer on a set projects. Right now, this includes measuring
execution time and peak memory usage. Benchmark runs analysis on every
project multiple times to get a better picture about the distribution
of measured values.
Additionally, this file includes a comparison routine for two benchmarking
results that plots the result together on one chart.
"""
import SATestUtils as utils
from SATestBuild import ProjectTester, stdout, TestInfo
from ProjectMap import ProjectInfo
import pandas as pd
from typing import List, Tuple
INDEX_COLUMN = "index"
def _save(data: pd.DataFrame, file_path: str):
data.to_csv(file_path, index_label=INDEX_COLUMN)
def _load(file_path: str) -> pd.DataFrame:
return
|
pd.read_csv(file_path, index_col=INDEX_COLUMN)
|
pandas.read_csv
|
""" This script simulates the input for the example section in the handout.
This files creates the following files that are then used to create the proper plots for the
handout.
"""
from itertools import product
import pandas as pd
import numpy as np
import respy as rp
NUM_POINTS = 10
IS_DEBUG = True
def mechanism_wrapper(simulate, params, label, change):
policy_params = params.copy()
if label == "delta":
policy_params.loc[("delta", "delta"), "value"] = change
elif label == "subsidy":
policy_params.loc[("nonpec_school", "hs_graduate"), "value"] += change
else:
raise NotImplementedError
policy_df = simulate(policy_params)
return policy_df.groupby("Identifier")["Experience_School"].max().mean()
def calc_choice_frequencies(df):
"""Compute choice frequencies."""
df_choice = df.groupby("Period").Choice.value_counts(normalize=True).unstack()
index = list(product(["probs"], df_choice.columns))
df_choice.columns = pd.MultiIndex.from_tuples(index)
return df_choice
def calc_wage_distribution_occupation(df):
"""Compute choice frequencies."""
df_occ = df.groupby(["Period", "Choice"])["Wage"].describe()[["mean", "std"]]
cond = df_occ.index.get_level_values("Choice").isin(["school", "home"])
df_occ = df_occ[~cond]
df_occ = df_occ.unstack()
return df_occ
def calc_wage_distribution_overall(df):
"""Compute choice frequencies."""
df_ove = df.groupby(["Period"])["Wage"].describe()[["mean", "std"]]
df_ove["Choice"] = "all"
df_ove.set_index(["Choice"], append=True, inplace=True)
df_ove = df_ove.reorder_levels(["Period", "Choice"])
df_ove = df_ove.unstack()
return df_ove
params, options, df_emp = rp.get_example_model("kw_97_extended_respy")
# We want to reduce the computational burden for debugging purposes and our continuous
# integration pipeline.
if IS_DEBUG:
options["n_periods"] = 12
simulate_func = rp.get_simulate_func(params, options)
df_sim = simulate_func(params)
df_descriptives = None
for label, df in [("empirical", df_emp), ("simulated", df_sim)]:
df_occ = calc_wage_distribution_occupation(df)
df_ove = calc_wage_distribution_overall(df)
df_choice = calc_choice_frequencies(df)
df_all = pd.concat([df_choice, df_occ, df_ove], axis=1)
df_all["Data"] = label
df_all.set_index(["Data"], append=True, inplace=True)
df_all = df_all.reorder_levels(["Data", "Period"])
df_descriptives = pd.concat([df_descriptives, df_all])
df_descriptives.index = df_descriptives.index.sort_values()
df_descriptives.to_pickle("data-descriptives.pkl")
# We evaluate the effect of a change in time preferences and a tuition subsidy.
subsidies = np.linspace(0, 4000, num=NUM_POINTS, dtype=int, endpoint=True)
deltas = np.linspace(0.910, 0.960, NUM_POINTS)
columns = ["level"]
index = list(product(["delta"], deltas)) + list(product(["subsidy"], subsidies))
index = pd.MultiIndex.from_tuples(index, names=["Experiment", "Change"])
df_mechanisms =
|
pd.DataFrame(columns=columns, index=index)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal, assert_series_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Double,
Integer,
NaturalLanguage,
)
from evalml.pipelines.components import TargetImputer
def test_target_imputer_no_y(X_y_binary):
X, y = X_y_binary
imputer = TargetImputer()
assert imputer.fit_transform(None, None) == (None, None)
imputer = TargetImputer()
imputer.fit(None, None)
assert imputer.transform(None, None) == (None, None)
def test_target_imputer_with_X():
X = pd.DataFrame({"some col": [1, 3, np.nan]})
y = pd.Series([np.nan, 1, 3])
imputer = TargetImputer(impute_strategy="median")
y_expected = pd.Series([2, 1, 3])
X_expected = pd.DataFrame({"some col": [1, 3, np.nan]})
X_t, y_t = imputer.fit_transform(X, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
assert_frame_equal(X_expected, X_t, check_dtype=False)
def test_target_imputer_median():
y = pd.Series([np.nan, 1, 10, 10, 6])
imputer = TargetImputer(impute_strategy="median")
y_expected = pd.Series([8, 1, 10, 10, 6])
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
def test_target_imputer_mean():
y = pd.Series([np.nan, 2, 0])
imputer = TargetImputer(impute_strategy="mean")
y_expected = pd.Series([1, 2, 0])
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
@pytest.mark.parametrize(
"fill_value, y, y_expected",
[
(None, pd.Series([np.nan, 0, 5]), pd.Series([0, 0, 5])),
(
None,
pd.Series([np.nan, "a", "b"]),
pd.Series(["missing_value", "a", "b"]).astype("category"),
),
(3, pd.Series([np.nan, 0, 5]), pd.Series([3, 0, 5])),
(3, pd.Series([np.nan, "a", "b"]), pd.Series([3, "a", "b"]).astype("category")),
],
)
def test_target_imputer_constant(fill_value, y, y_expected):
imputer = TargetImputer(impute_strategy="constant", fill_value=fill_value)
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
def test_target_imputer_most_frequent():
y = pd.Series([np.nan, "a", "b"])
imputer = TargetImputer(impute_strategy="most_frequent")
y_expected = pd.Series(["a", "a", "b"]).astype("category")
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
y = pd.Series([np.nan, 1, 1, 2])
imputer = TargetImputer(impute_strategy="most_frequent")
y_expected = pd.Series([1, 1, 1, 2])
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
def test_target_imputer_col_with_non_numeric_with_numeric_strategy():
y = pd.Series([np.nan, "a", "b"])
imputer = TargetImputer(impute_strategy="mean")
with pytest.raises(
ValueError, match="Cannot use mean strategy with non-numeric data"
):
imputer.fit_transform(None, y)
with pytest.raises(
ValueError, match="Cannot use mean strategy with non-numeric data"
):
imputer.fit(None, y)
imputer = TargetImputer(impute_strategy="median")
with pytest.raises(
ValueError, match="Cannot use median strategy with non-numeric data"
):
imputer.fit_transform(None, y)
with pytest.raises(
ValueError, match="Cannot use median strategy with non-numeric data"
):
imputer.fit(None, y)
@pytest.mark.parametrize("data_type", ["pd", "ww"])
def test_target_imputer_all_bool_return_original(data_type, make_data_type):
y = pd.Series([True, True, False, True, True], dtype=bool)
y = make_data_type(data_type, y)
y_expected = pd.Series([True, True, False, True, True], dtype=bool)
imputer = TargetImputer()
imputer.fit(None, y)
_, y_t = imputer.transform(None, y)
assert_series_equal(y_expected, y_t)
@pytest.mark.parametrize("data_type", ["pd", "ww"])
def test_target_imputer_boolean_dtype(data_type, make_data_type):
y = pd.Series([True, np.nan, False, np.nan, True], dtype="category")
y_expected = pd.Series([True, True, False, True, True], dtype="category")
y = make_data_type(data_type, y)
imputer = TargetImputer()
imputer.fit(None, y)
_, y_t = imputer.transform(None, y)
assert_series_equal(y_expected, y_t)
def test_target_imputer_fit_transform_all_nan_empty():
y = pd.Series([np.nan, np.nan])
imputer = TargetImputer()
imputer.fit(None, y)
with pytest.raises(RuntimeError, match="Transformed data is empty"):
imputer.transform(None, y)
imputer = TargetImputer()
with pytest.raises(RuntimeError, match="Transformed data is empty"):
imputer.fit_transform(None, y)
def test_target_imputer_numpy_input():
y = np.array([np.nan, 0, 2])
imputer = TargetImputer(impute_strategy="mean")
y_expected = np.array([1, 0, 2])
_, y_t = imputer.fit_transform(None, y)
assert np.allclose(y_expected, y_t)
np.testing.assert_almost_equal(y, np.array([np.nan, 0, 2]))
def test_target_imputer_does_not_reset_index():
y = pd.Series(np.arange(10))
y[5] = np.nan
assert y.index.tolist() == list(range(10))
y.drop(0, inplace=True)
pd.testing.assert_series_equal(
pd.Series(
[1, 2, 3, 4, np.nan, 6, 7, 8, 9], dtype=float, index=list(range(1, 10))
),
y,
)
imputer = TargetImputer(impute_strategy="mean")
imputer.fit(None, y=y)
_, y_t = imputer.transform(None, y)
pd.testing.assert_series_equal(
pd.Series([1.0, 2, 3, 4, 5, 6, 7, 8, 9], dtype=float, index=list(range(1, 10))),
y_t,
)
@pytest.mark.parametrize(
"y, y_expected",
[
(pd.Series([1, 0, 5, None]), pd.Series([1, 0, 5, 2])),
(pd.Series([0.1, 0.0, 0.5, None]), pd.Series([0.1, 0.0, 0.5, 0.2])),
],
)
def test_target_imputer_with_none(y, y_expected):
imputer = TargetImputer(impute_strategy="mean")
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
@pytest.mark.parametrize(
"y, y_expected",
[
(
pd.Series(["b", "a", "a", None], dtype="category"),
pd.Series(["b", "a", "a", "a"], dtype="category"),
),
(
pd.Series([True, None, False, True], dtype="category"),
pd.Series([True, True, False, True], dtype="category"),
),
(
pd.Series(["b", "a", "a", None]),
pd.Series(["b", "a", "a", "a"], dtype="category"),
),
],
)
def test_target_imputer_with_none_non_numeric(y, y_expected):
imputer = TargetImputer()
_, y_t = imputer.fit_transform(None, y)
assert_series_equal(y_expected, y_t, check_dtype=False)
@pytest.mark.parametrize(
"y_pd",
[
pd.Series([1, 2, 3], dtype="int64"),
pd.Series([1.0, 2.0, 3.0], dtype="float"),
pd.Series(["a", "b", "a"], dtype="category"),
|
pd.Series([True, False, True], dtype=bool)
|
pandas.Series
|
import csv
import os
import pandas
import sys
from pandas import DataFrame, Series
from datetime import datetime, timedelta, date
players_csv = '../../report/dataset/player_overviews_unindexed_csv.csv'
matches_csv = ['../../report/dataset/match_scores_1968-1990_unindexed_csv.csv',
'../../report/dataset/match_scores_1991-2016_unindexed_csv.csv',
'../../report/dataset/match_scores_2017_unindexed_csv.csv']
tourneys_csv = '../../report/dataset/tournaments_1877-2017_unindexed_csv.csv'
def k_factor(matches_played):
# This function returns the calculated k factor based on the matches played and the selected values
# for the constants k, offset and shape. The parameters values are set based on FiveThirtyEight suggestion:
# https://fivethirtyeight.com/features/serena-williams-and-the-difference-between-all-time-great-and-greatest-of-all-time
k = 250
offset = 5
shape = 0.4
return k/(matches_played + offset)**shape
def calc_expected_score(playerA_rating, playerB_rating):
# This function calculates the probabilties for playerA to win the match against playerB
# based on the elo modelling from this sample: https://www.betfair.com.au/hub/tennis-elo-modelling
exp_score = 1/(1+(10**((playerB_rating - playerA_rating)/400)))
return exp_score
def update_elo(old_elo, k, is_winner, expected_score):
# This function updates the elo rating based on the previous value and the calculated
# k factor and expected score.
new_elo = old_elo + k * ((1 if is_winner else 0) - expected_score)
return new_elo
def read_players(csv_file):
with open(csv_file) as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
# player_id, last_name, first_name, flag_code, turned_pro
cols_index = [0, 2, 3, 5]
all_players = []
for row in read_csv:
player_info = [""]
for col_index in cols_index:
player_info.append(row[col_index])
all_players.append(player_info)
headers = ['week_title', 'player_id',
'last_name', 'first_name', 'flag_code']
players = DataFrame(all_players, columns=headers)
players['current_elo'] = Series(1500, index=players.index)
players['last_tourney_date'] = Series('N/A', index=players.index)
players['matches_played'] = Series(0, index=players.index)
players['peak_elo'] = Series(1500, index=players.index)
players['peak_elo_date'] = Series('N/A', index=players.index)
players['retirement_year'] = Series('N/A', index=players.index)
players['turned_pro'] =
|
Series('N/A', index=players.index)
|
pandas.Series
|
# Build default model and do permutation feature importance (PFI)
import warnings
import pandas as pd
import numpy as np
from sklearn import ensemble, model_selection, metrics, inspection
from skopt import BayesSearchCV, space
import shap
import load_data
import misc_util
RANDOM_SEED = 11798
# A very repetitive BayesSearchCV warning I'd like to ignore
warnings.filterwarnings('ignore', message='The objective has been evaluated at this point before.')
print('Loading labels from original data')
label_map = {p: pdf.label.iloc[0] for p, pdf in load_data.train_full().groupby('STUDENTID')}
# Set up model training parameters
m = ensemble.ExtraTreesClassifier(500, bootstrap=True, random_state=RANDOM_SEED)
bayes_grid = {
'max_features': space.Real(.001, 1),
'max_samples': space.Real(.001, .999), # For bootstrapping
'ccp_alpha': space.Real(0, .004), # Range determined via ccp_alpha_explore.py
}
xval = model_selection.StratifiedKFold(4, shuffle=True, random_state=RANDOM_SEED)
scoring = metrics.make_scorer(misc_util.adjusted_thresh_kappa, needs_proba=True)
# Getting BayesSearchCV to work requires modifying site-packages/skopt/searchcv.py per:
# https://github.com/scikit-optimize/scikit-optimize/issues/762
gs = BayesSearchCV(m, bayes_grid, n_iter=100, n_jobs=3, cv=xval, verbose=0, scoring=scoring,
random_state=RANDOM_SEED, optimizer_kwargs={'n_initial_points': 20})
# Build model for 30m data as an example
train_result = []
print('Loading data')
feat_names = list(pd.read_csv('features_fe/filtered_features_30m.csv').feature)
train_df = pd.read_csv('features_fe/train_30m.csv')[['STUDENTID'] + feat_names]
holdout_df = pd.read_csv('features_fe/holdout_30m.csv')[['STUDENTID'] + feat_names]
for fset in ['features_tsfresh', 'features_featuretools']:
feat_names = list(
|
pd.read_csv(fset + '/filtered_features_30m.csv')
|
pandas.read_csv
|
import numpy as np
import glob
import logging
import os
from time import time
import SimpleITK as sitk
import numpy as np
import pandas as pd
import yaml
from sklearn.model_selection import KFold
from src.utils.Utils_io import ensure_dir
from src.visualization.Visualize import plot_value_histogram
def copy_meta_and_save(new_image, reference_sitk_img, full_filename=None, override_spacing=None, copy_direction=True):
"""
Copy metadata, UID and structural information from one image to another
Works also for different dimensions, returns new_image with copied structural info
:param new_image: sitk.Image
:param reference_sitk_img: sitk.Image
:param path: full file path as str
:return:
"""
t1 = time()
try:
# make sure this method works with nda and sitk images
if isinstance(new_image, np.ndarray):
if len(new_image.shape) == 4:
# 4D needs to be built from a series
new_image = [sitk.GetImageFromArray(img) for img in new_image]
new_image = sitk.JoinSeries(new_image)
else:
new_image = sitk.GetImageFromArray(new_image)
ensure_dir(os.path.dirname(os.path.abspath(full_filename)))
if reference_sitk_img is not None:
assert (isinstance(reference_sitk_img, sitk.Image)), 'no reference image given'
assert (isinstance(new_image, sitk.Image)), 'only np.ndarrays and sitk images could be stored'
# copy metadata
for key in reference_sitk_img.GetMetaDataKeys():
new_image.SetMetaData(key, get_metadata_maybe(reference_sitk_img, key))
logging.debug('Metadata_copied: {:0.3f}s'.format(time() - t1))
# copy structural informations to image with same dimension and size
if (reference_sitk_img.GetDimension() == new_image.GetDimension()) and (
reference_sitk_img.GetSize() == new_image.GetSize()):
new_image.CopyInformation(reference_sitk_img)
# same dimension (e.g. 4) but different size per dimension
elif (reference_sitk_img.GetDimension() == new_image.GetDimension()):
# copy spacing, origin and rotation but keep size as it is
if copy_direction:
new_image.SetDirection(reference_sitk_img.GetDirection())
new_image.SetOrigin(reference_sitk_img.GetOrigin())
new_image.SetSpacing(reference_sitk_img.GetSpacing())
# copy structural information to smaller images e.g. 4D to 3D
elif reference_sitk_img.GetDimension() > new_image.GetDimension():
shape_ = len(new_image.GetSize())
reference_shape = len(reference_sitk_img.GetSize())
# copy direction to smaller images
# 1. extract the direction, 2. create a matrix, 3. slice by the new shape, 4. flatten
if copy_direction:
direction = np.array(reference_sitk_img.GetDirection())
dir_ = direction.reshape(reference_shape, reference_shape)
direction = dir_[:shape_, :shape_].flatten()
new_image.SetDirection(direction)
new_image.SetOrigin(reference_sitk_img.GetOrigin()[:shape_])
new_image.SetSpacing(reference_sitk_img.GetSpacing()[:shape_])
# copy structural information to bigger images e.g. 3D to 4D, fill with 1.0 spacing
else:
ones = [1.0] * (new_image.GetDimension() - reference_sitk_img.GetDimension())
new_image.SetOrigin((*reference_sitk_img.GetOrigin(), *ones))
new_image.SetSpacing((*reference_sitk_img.GetSpacing(), *ones))
# we cant copy the direction from smaller images to bigger ones
logging.debug('spatial data_copied: {:0.3f}s'.format(time() - t1))
if override_spacing:
new_image.SetSpacing(override_spacing)
if full_filename != None:
# copy uid
writer = sitk.ImageFileWriter()
# writer.KeepOriginalImageUIDOn()
writer.SetFileName(full_filename)
writer.Execute(new_image)
logging.debug('image saved: {:0.3f}s'.format(time() - t1))
return True
except Exception as e:
logging.error('Error with saving file: {} - {}'.format(full_filename, str(e)))
return False
else:
return new_image
def create_4d_volumes_from_4d_files(img_f, mask_f, full_path='data/raw/GCN/3D/', slice_threshold=2):
"""
Expects an 4d-image and -mask file name and a target path
filter mask and image volumes by contoured time-steps
copy all metadata
save them to the destination path
:param img_f: 4D image filepath as str
:param mask_f: 4D mask filepath as str
:param full_path: export path as str
:param slice_threshold: minimal masks per timestep as int
:return:
"""
logging.info('process file: {}'.format(img_f))
# get sitk images
mask_4d_sitk = sitk.ReadImage(mask_f)
img_4d_sitk = sitk.ReadImage(img_f)
# filter 4d image nda according to given mask nda
mask_4d_nda, masked_t = filter_4d_vol(mask_4d_sitk, slice_threshold=slice_threshold)
img_4d_nda = sitk.GetArrayFromImage(img_4d_sitk)[masked_t]
# write filtered 4d image to disk
patient_name = os.path.basename(img_f).split('.')[0].replace('volume_clean', '')
img_file = '{}_{}{}'.format(patient_name, 'img', '.nrrd')
mask_file = '{}_{}{}'.format(patient_name, 'msk', '.nrrd')
copy_meta_and_save(img_4d_nda, img_4d_sitk, os.path.join(full_path, img_file))
copy_meta_and_save(mask_4d_nda, img_4d_sitk, os.path.join(full_path, mask_file))
return [masked_t, list(img_4d_nda.shape)]
def extract_spacing(matlabfile=None, is_sax=True):
"""
extract the spacing from a medvisio export matlabfile
of a CMR image, either SAX or 4CH
returns (1,1,1,6) for (z,t,x,y) if none spacing could be found
:param matlabfile (np.ndarray) matlabfile opened via scipy.io.loadmat(example.mat)
:param is_sax (bool) toggle between sax or 4ch spacing
:return: (tuple) spacing in the following order (z,t,x,y)
"""
assert (matlabfile is not None), 'no matlab file given, please provide *.mat file as np.ndarray'
try:
values = dict([(keys.lower(), value) for keys, value in
zip(matlabfile['setstruct'][0].dtype.names, matlabfile['setstruct'][0][int(is_sax)])])
except Exception as e:
print(str(e))
values = dict()
spacing_x = float(values.get('resolutionx', 1))
spacing_y = float(values.get('resolutiony', 1))
spacing_t = float(1)
spacing_z = float(values.get('slicethickness', 6))
return (spacing_z, spacing_t, spacing_x, spacing_y)
def create_3d_volumes_from_4d_files(img_f, mask_f, full_path='data/raw/tetra/3D/', slice_treshhold=2):
"""
Expects an 4d-image and -mask file name and a target path
filter mask and image volumes with segmentation
copy all metadata
save them to the destination path
:param img_f:
:param mask_f:
:param full_path:
:return:
"""
logging.info('process file: {}'.format(img_f))
# get sitk images
mask_4d_sitk = sitk.ReadImage(mask_f)
img_4d_sitk = sitk.ReadImage(img_f)
# filter 4d image nda according to given mask nda
mask_4d_nda, masked_t = filter_4d_vol(mask_4d_sitk, slice_threshold=slice_treshhold)
img_4d_nda = sitk.GetArrayFromImage(img_4d_sitk)[masked_t]
# create t 3d volumes
for img_3d, mask_3d, t in zip(img_4d_nda, mask_4d_nda, masked_t):
# write 3d image
patient_name = os.path.basename(img_f).split('.')[0].replace('volume_clean', '')
img_file = '{}_t{}_{}{}'.format(patient_name, str(t), 'img', '.nrrd')
mask_file = '{}_t{}_{}{}'.format(patient_name, str(t), 'msk', '.nrrd')
copy_meta_and_save(img_3d, img_4d_sitk, os.path.join(full_path, img_file))
copy_meta_and_save(mask_3d, img_4d_sitk, os.path.join(full_path, mask_file))
return [masked_t, list(img_4d_nda.shape)]
def create_2d_slices_from_4d_volume_files(img_f, mask_f, export_path, filter_by_mask=True, slice_threshold=2):
"""
Expects an 4d-image and -mask file name and a target path
filter mask and image volumes with segmentation
copy all metadata
save them to the destination path
:param img_f:
:param mask_f:
:param export_path: str
:param filter_by_mask: bool
:param slice_threshold: int
:return:
"""
logging.info('process file: {}'.format(img_f))
# get sitk images
mask_4d_sitk = sitk.ReadImage(mask_f)
img_4d_sitk = sitk.ReadImage(img_f)
# filter 4d image nda according to given mask nda
if filter_by_mask:
mask_4d_nda, masked_t = filter_4d_vol(mask_4d_sitk, slice_threshold=slice_threshold)
img_4d_nda = sitk.GetArrayFromImage(img_4d_sitk)[masked_t]
else:
img_4d_nda = sitk.GetArrayFromImage(img_4d_sitk)
masked_t = list(range(img_4d_nda.shape[0]))
mask_4d_nda = sitk.GetArrayFromImage(mask_4d_sitk)
# create t x 3d volumes
for img_3d, mask_3d, t in zip(img_4d_nda, mask_4d_nda, masked_t):
# get patient_name
patient_name = os.path.basename(img_f).split('.')[0].replace('volume_clean', '')
# create z x 2d slices
for z, slice_2d in enumerate(zip(img_3d, mask_3d)):
# create filenames with reference to t and z position
img_file = '{}_t{}_z{}_{}{}'.format(patient_name, str(t), str(z), 'img', '.nrrd')
mask_file = '{}_t{}_z{}_{}{}'.format(patient_name, str(t), str(z), 'msk', '.nrrd')
# save nrrd file with metadata
copy_meta_and_save(slice_2d[0], img_4d_sitk, os.path.join(export_path, img_file), copy_direction=False)
copy_meta_and_save(slice_2d[1], img_4d_sitk, os.path.join(export_path, mask_file), copy_direction=False)
return [masked_t, list(img_4d_nda.shape)]
def create_2d_slices_from_3d_volume_files_any_filename(img_f, mask_f, export_path):
"""
Helper to split a GCN 3D dicom file into z x 2D slices
Expects an 3d-image and -mask file name and a target path
filter mask and image volumes with segmentation
copy all metadata
save them to the destination path
:param img_f:
:param mask_f:
:param full_path:
:return:
"""
import re
logging.info('process file: {}'.format(img_f))
# get sitk images
mask_3d_sitk = sitk.ReadImage(mask_f)
img_3d_sitk = sitk.ReadImage(img_f)
# filter 4d image nda according to given mask nda
mask_3d = sitk.GetArrayFromImage(mask_3d_sitk)
img_3d = sitk.GetArrayFromImage(img_3d_sitk)
# get file names
_, img_f = os.path.split(img_f)
_, mask_f = os.path.split(mask_f)
def get_new_name(f_name, z):
match = ''
# check if image or mask
m = re.search('_img|_msk', f_name)
if m:
match = m.group(0)
# extend filename at the very last position before 'img' or 'msk'
return re.sub('{}.nrrd'.format(match), '_{}{}.nrrd'.format(z, match), f_name)
# create z x 2d slices
for z, slice_2d in enumerate(zip(img_3d, mask_3d)):
# create filenames with reference to t and z position
# extend the filename
img_file = get_new_name(img_f, z)
mask_file = get_new_name(mask_f, z)
# save nrrd file with metadata
copy_meta_and_save(slice_2d[0], img_3d_sitk, os.path.join(export_path, img_file))
copy_meta_and_save(slice_2d[1], img_3d_sitk, os.path.join(export_path, mask_file))
return list(img_3d.shape)
def create_2d_slices_from_3d_volume_files(img_f, mask_f, export_path):
"""
Helper for ACDC data
Expects an 3d-image and -mask file name and a target path
filter mask and image volumes with segmentation
copy all metadata
save them to the destination path
:param img_f:
:param mask_f:
:param full_path:
:return:
"""
logging.info('process file: {}'.format(img_f))
# get sitk images
mask_3d_sitk = sitk.ReadImage(mask_f)
img_3d_sitk = sitk.ReadImage(img_f)
# filter 4d image nda according to given mask nda
mask_3d = sitk.GetArrayFromImage(mask_3d_sitk)
img_3d = sitk.GetArrayFromImage(img_3d_sitk)
# get patient_name
patient_name = os.path.basename(img_f).split('_')[0]
frame = os.path.basename(img_f).split('frame')[1][:2]
# create z x 2d slices
for z, slice_2d in enumerate(zip(img_3d, mask_3d)):
# create filenames with reference to t and z position
img_file = '{}__t{}_z{}_{}{}'.format(patient_name, str(frame), str(z), 'img', '.nrrd')
mask_file = '{}__t{}_z{}_{}{}'.format(patient_name, str(frame), str(z), 'msk', '.nrrd')
# save nrrd file with metadata
copy_meta_and_save(slice_2d[0], img_3d_sitk, os.path.join(export_path, img_file))
copy_meta_and_save(slice_2d[1], img_3d_sitk, os.path.join(export_path, mask_file))
return [frame, list(img_3d.shape)]
def get_patient(filename_to_2d_nrrd_file):
"""
Split the nrrd filename and returns the patient id
split the filename by '_' returns the first two elements of that list
If the filename contains '__' it returns the part before
"""
import re
m = re.search('__', filename_to_2d_nrrd_file)
if m: # nrrd filename with '__'
return os.path.basename(filename_to_2d_nrrd_file).split('__')[0]
if os.path.basename(filename_to_2d_nrrd_file).startswith('patient'): # acdc file
return os.path.basename(filename_to_2d_nrrd_file).split('_')[0]
else: # gcn filename
return '_'.join(os.path.basename(filename_to_2d_nrrd_file).split('_')[:2])
def get_trainings_files(data_path, fold=0, path_to_folds_df='data/raw/gcn_05_2020_ax_sax_86/folds.csv'):
"""
Load trainings and test files of a directory by a given folds-dataframe
:param data_path:
:param fold:
:param path_to_folds_df:
:return: x_train, y_train, x_val, y_val
"""
img_suffix = '*img.nrrd'
mask_suffix = '*msk.nrrd'
# load the nrrd files with given pattern from the data path
x = sorted(glob.glob(os.path.join(data_path, img_suffix)))
y = sorted(glob.glob(os.path.join(data_path, mask_suffix)))
if len(x) == 0:
logging.info('no files found, try to load with acdc file pattern')
x, y = load_acdc_files(data_path)
df = pd.read_csv(path_to_folds_df)
patients = df[df.fold.isin([fold])]
# make sure we count each patient only once
patients_train = patients[patients['modality'] == 'train']['patient'].unique()
patients_test = patients[patients['modality'] == 'test']['patient'].unique()
logging.info('Found {} images/masks in {}'.format(len(x), data_path))
logging.info('Patients train: {}'.format(len(patients_train)))
def filter_files_for_fold(list_of_filenames, list_of_patients):
"""Helper to filter one list by a list of substrings"""
from src.data.Dataset import get_patient
return [str for str in list_of_filenames
if get_patient(str) in list_of_patients]
x_train = sorted(filter_files_for_fold(x, patients_train))
y_train = sorted(filter_files_for_fold(y, patients_train))
x_test = sorted(filter_files_for_fold(x, patients_test))
y_test = sorted(filter_files_for_fold(y, patients_test))
assert (len(x_train) == len(y_train)), 'len(x_train != len(y_train))'
logging.info('Selected {} of {} files with {} of {} patients for training fold {}'.format(len(x_train), len(x),
len(patients_train),
len(df.patient.unique()),
fold))
return x_train, y_train, x_test, y_test
def get_kfolded_data(kfolds=4, path_to_data='data/raw/tetra/2D/', extract_patient_id=get_patient):
"""
filter all image files by patient names defined in fold n
functions expects subfolders, collects all image, mask files
and creates a list of unique patient ids
create k folds of this patient ids
filter the filenames containing the patient ids from each kfold split
returns
:param kfolds: number of splits
:param path_to_data: path to root of split data e.g. 'data/raw/tetra/2D/'
:param extract_patient_id: function which returns the patient id for each filename in path_to_data
:return: a dataframe with the following columns:
fold (kfolds-1),
x_path (full filename to image as nrrd),
y_path (full filename to mask as nrrd),
modality(train or test)
patient (patient id)
"""
img_pattern = '*img.nrrd'
columns = ['fold', 'x_path', 'y_path', 'modality', 'patient']
modality_train = 'train'
modality_test = 'test'
seed = 42
# get all images, masks from given directory
# get all img files in all subdirs
x = sorted(glob.glob(os.path.join(path_to_data, '**/*{}'.format(img_pattern))))
# if no subdirs given, search in root
if len(x) == 0:
x = sorted(glob.glob(os.path.join(path_to_data, '*{}'.format(img_pattern))))
logging.info('found: {} files'.format(len(x)))
# create a unique list of patient ids
patients = sorted(list(set([extract_patient_id(f) for f in x])))
logging.info('found: {} patients'.format(len(patients)))
# create a k-fold instance with k = kfolds
kfold = KFold(n_splits=kfolds, shuffle=True, random_state=seed)
def filter_x_by_patient_ids_(x, patient_ids, modality, columns, f):
# create a dataframe from x (list of filenames) filter by patient ids
# returns a dataframe
df = pd.DataFrame(columns=columns)
df['x_path'] = [elem for elem in x if extract_patient_id(elem) in patient_ids]
df['y_path'] = [elem.replace('img', 'msk') for elem in df['x_path']]
df['fold'] = [f] * len(df['x_path'])
df['modality'] = [modality] * len(df['x_path'])
df['patient'] = [extract_patient_id(elem) for elem in df['x_path']]
logging.debug(len(df['x_path']))
logging.debug(patient_ids)
logging.debug(len(x))
logging.debug(extract_patient_id(x[0]))
return df
# split patients k times
# use the indexes to get the patient ids from x
# use the patient ids to filter train/test from the complete list of files
df_folds = pd.DataFrame(columns=columns)
for f, idx in enumerate(
kfold.split(patients)): # f = fold, idx = tuple with all indexes to split the patients in this fold
train_idx, test_idx = idx
# create a list of train and test indexes
logging.debug("TRAIN: {}, TEST: {}".format(train_idx, test_idx))
# slice the filenames by the given indexes
patients_train, patients_test = [patients[i] for i in train_idx], [patients[i] for i in test_idx]
df_train = filter_x_by_patient_ids_(x, patients_train, modality_train, columns, f)
df_test = filter_x_by_patient_ids_(x, patients_test, modality_test, columns, f)
# merge train and test
df_fold = pd.concat([df_train, df_test])
# merge fold into folds dataset
df_folds = pd.concat([df_fold, df_folds])
return df_folds
def filter_x_by_patient_ids(x, patient_ids, modality='test',
columns=['x_path', 'y_path', 'fold', 'modality', 'patient', 'pathology'], fold=0,
pathology=None, filter=True):
"""
Create a df from a given list of files
and a list of patient which are used to filter the file names
:param x:
:param patient_ids:
:param modality:
:param columns:
:param f:
:param pathology:
:return:
"""
# create a dataframe from x (list of filenames) filter by patient ids
# returns a dataframe
df = pd.DataFrame(columns=columns)
if filter:
df['x_path'] = [elem for elem in x if get_patient(elem) in patient_ids]
else:
df['x_path'] = [elem for elem in x]
df['y_path'] = [elem.replace('img', 'msk') for elem in df['x_path']]
df['fold'] = [fold] * len(df['x_path'])
df['modality'] = [modality] * len(df['x_path'])
df['patient'] = [get_patient(elem) for elem in df['x_path']]
df['pathology'] = [pathology] * len(df['x_path'])
return df
def get_n_patients(df, n=1):
"""
Select n random patients
Filter the data frame by this patients
Use the Fold 0 split as default
Override the modality for all random selected patients to "train"
return filtered df
:param df:
:param n:
:param fold:
:return:
"""
# fold is not important,
# because we return patients from train and test modality
fold = 0
# make random.choice idempotent
np.random.seed(42)
# select random patients
patients = np.random.choice(sorted(df['patient'].unique()), size=n)
logging.info('Added patients: {} from the GCN dataset'.format(patients))
# filter data frame by fold and by random selected patients ids, make sure to make a copy to avoid side effects
df_temp = df[(df['fold'] == fold) & (df['patient'].isin(patients))].copy()
# make sure all selected images will be used during training, change modality to train for this images
# train_kfolded will only use images with modality == train, override the modality for all selected patients/rows
df_temp.loc[:, 'modality'] = 'train'
df_temp.reset_index(inplace=True)
return df_temp
def get_train_data_from_df(first_df='reports/kfolds_data/2D/acdc/df_kfold.csv', second_df=None,
n_second_df=0, n_first_df=None, fold=0, ):
"""
load one df and select n patients, default: use all
load a second df, if given
select n patients from second df,
merge first df into second df
return x_train, y_train, x_val, y_val as list of files
:param df_fname: full file/pathname to first df (str)
:param second_df_fname: full file/pathname to second df (str)
:param n_second_df: number of patients from second df, that should be merged
:param n_patients_first_df: int - number of patients to load from the first dataframe
:param fold: select a fold from df
:return:
"""
extend = dict()
extend['GCN_PATIENTS'] = list()
extend['GCN_IMAGES'] = 0
df = pd.read_csv(first_df)
# take only n patients from the first dataframe
if n_first_df:
df = get_n_patients(df, n_first_df)
# if second dataframe given, load df, select m patients, and concat this dataframe with the first one
if second_df: # extend dataframe with n patients from second dataframe
df_second = pd.read_csv(second_df)
df_second = get_n_patients(df_second, n_second_df)
df = pd.concat([df, df_second], sort=False)
extend['GCN_PATIENTS'] = sorted(df_second['patient'].unique())
extend['GCN_IMAGES'] = len(df_second)
logging.info('loaded df from {} with shape: {}'.format(first_df, df.shape))
logging.info('available folds: {}, selected fold: {}'.format(df.fold.unique(), fold))
if 'fold' in df:
df = df[df['fold'] == fold]
if 'pathology' in df:
logging.info(
'available modalities: {}\n available pathologies: {}'.format(df.modality.unique(), df.pathology.unique()))
# get a trainings and a test dataframe
df_train = df[df['modality'] == 'train']
df_test = df[df['modality'] == 'test']
return sorted(df_train['x_path'].values), sorted(df_train['y_path'].values), sorted(
df_test['x_path'].values), sorted(df_test[
'y_path'].values), extend
def create_acdc_dataframe_for_cv(path_to_data='data/raw/ACDC/2D/', kfolds=4,
original_acdc_dir='data/raw/ACDC/original/all/', img_pattern='*img.nrrd'):
"""
Creates a dataframe of all 2D ACDC cmr filenames,
splits the data in n folds with respect to the pathologies
0. Load all 2D nrrd files in the given directory
1. Create a dataframe with all original ACDC files, patient ids, pathology
2. Create a list of all pathologies
3. Create n (# of pathologies) subgroups of patients, filtered by pathology
4. Create k splits (# of splits) of each patient subgroup
5. For each pathology split: collect all 2D files
6. Merge 2D files according to their split number and modality (train/val)
return a df with all files for each split
f_name, patient, split, train_modality, pathology
:param img_pattern:
:param path_to_data: path to 2D ACDC files
:param kfolds:
:return: dataframe with all splits
"""
from sklearn.model_selection import KFold
seed = 42
# img_pattern = '*img.nrrd'
columns = ['fold', 'x_path', 'y_path', 'modality', 'patient', 'pathology']
modality_train = 'train'
modality_test = 'test'
# list all nrrd image files within the subdirs of path_to_data
acdc_x_files = sorted(glob.glob(os.path.join(path_to_data, '**/{}'.format(img_pattern))))
logging.info('Found: {} files in {}'.format(len(acdc_x_files), path_to_data))
# get all ACDC files + pathology as df
# provide a seed to make shuffle idempotent
df = get_acdc_dataset_as_df(original_acdc_dir)
logging.info('Created a dataframe with shape: {}'.format(df.shape))
pathologies = df['pathology'].unique()
kfold = KFold(kfolds, shuffle=True, random_state=seed)
# create a df to merge all splits into
df_folds = pd.DataFrame(columns=columns)
# for each pathology, create k folds of train/test splits
for pathology in pathologies:
# collect all patients with this pathology
patients = df[df['pathology'] == pathology]['patient'].unique()
logging.debug('{} Patients found for pathology: {}'.format(len(patients), pathology))
# create k fold of train/test splits
# split with the patient ids
# to make sure that one patient occurs either as train or validation data
# f = fold, idx = tuple with all indexes to split the patients in this fold
# kfold.split returns the patient indexes for each fold
for fold, idx in enumerate(kfold.split(patients)):
train_idx, test_idx = idx
# logging.debug("TRAIN:", train_idx, "TEST:", test_idx)
# create one list for the train and test patient ids from the kfold split indexes
patients_train, patients_test = [patients[i] for i in train_idx], [patients[i] for i in test_idx]
logging.debug('Fold: {}, Pathology: {} train: {}'.format(fold, pathology, patients_train))
logging.debug('Fold: {}, Pathology: {}, test: {}'.format(fold, pathology, patients_test))
# filter the 2D filenames by the two patient id lists (train/test) for this fold
# create one df for each split (train/test) with the corresponding 2D nrrd files
df_train = filter_x_by_patient_ids(acdc_x_files, patients_train, modality_train, columns, fold, pathology)
df_test = filter_x_by_patient_ids(acdc_x_files, patients_test, modality_test, columns, fold, pathology)
logging.debug('Files x_train: {}'.format(len(df_train)))
logging.debug('Files x_test: {}'.format(len(df_test)))
# merge train and test files of this split
df_fold = pd.concat([df_train, df_test])
# merge this fold into folds dataset
df_folds =
|
pd.concat([df_fold, df_folds], sort=True)
|
pandas.concat
|
import pandas as pd
dataset = pd.read_excel("C:\MY PROGRAM FILES\TT_DataMining_Youssou.xlsx", sheet_name= "EDU")
dataset.head()
dataset.columns
columns_to_drop = ['Dupli\nName', 'Full Address', 'State US/Can', 'GMaps', 'Company Type', 'Checked Fields', 'Website', 'Notes', 'Duplicate Domain', 'Checked Contacts', 'Parent company', 'Score', 'GMaps link', 'Country', 'State', 'Phone', 'Short Address', 'City', 'County', 'Owner', 'Zip', 'Latitude', 'Longitude','Charter School', 'Nces Id', 'School Level', 'Unnamed: 36']
dataset = dataset.drop(columns = columns_to_drop)
dataset
#Unpivoting data --> transposing columns into rows
#id_vars = list(dataset.columns)[0]
#dataset.melt(id_vars=id_vars)
contacts = pd.read_excel("C:\MY PROGRAM FILES\TT_DataMining_Youssou2.xlsx", sheet_name= "EDU_C")
#pd.merge(left= ,right= ,how= ,left_on= ,right_on=)
dataset_joint =
|
pd.merge(left=dataset, right=contacts, how="left", left_on="Domain", right_on="Domain")
|
pandas.merge
|
import sys
#sys.path.append("..")
import numpy as np
import theano
import theano.tensor as T
import theano.tensor.signal.conv as C
from epimodel import EpidemiologicalParameters, preprocess_data
np.random.seed(123456)
import argparse
import copy
import datetime
import itertools
import pickle
import re
from datetime import timedelta
import matplotlib.dates as mdates
import pandas as pd
import pymc3 as pm
from sklearn import preprocessing
US = True
MOBI = "include" # "exclude"
RADER_JOINT = False
GATHERINGS = 3
# MASKING = True # Now always true
# smoothing happens at data init time if at all
assert MOBI in ["exclude", "only", "include"]
mob_cols = [
"avg_mobility_no_parks_no_residential",
"residential_percent_change_from_baseline",
]
# Ds = pd.date_range('2020-05-01', '2020-09-01', freq='D')
# Adding 20 days to account for death delay
Ds = pd.date_range("2020-05-01", "2020-09-21", freq="D")
def fill_missing_days(df):
df = df.set_index(["date", "country"])
df = df.unstack(fill_value=-1).asfreq("D", fill_value=-1).stack().reset_index()
df = df.replace(-1, np.nan)
return interpolate_wearing_fwd_bwd(df)
def interpolate_wearing_fwd_bwd(df):
regions = df.country.unique()
cs = []
for r in regions:
c = df[df.country == r]
c = c.set_index("date")
c = c.interpolate(method="time", limit_direction="both").reset_index()
cs.append(c)
return pd.concat(cs)
# moving average
def smooth(x, window_len=7):
l = window_len
s = np.r_[x[l - 1 : 0 : -1], x, x[-2 : -l - 1 : -1]]
w = np.ones(window_len, "d")
return np.convolve(w / w.sum(), s, mode="valid")
def smooth_rader(df, win=7):
for r in df.label.unique():
s = df[df.label == r]
s["percent_mc"] = smooth(s["percent_mc"], window_len=win)[: -win + 1]
df[df.label == r] = s
return df
def joint_shop_work(df, THRESHOLD=2):
return (df.likely_wear_mask_grocery_shopping <= THRESHOLD) & (
df.likely_wear_mask_workplace <= THRESHOLD
)
def mean_shop_work(df, THRESHOLD=2):
venues = ["likely_wear_mask_grocery_shopping", "likely_wear_mask_workplace"]
df["percent_mc"] = df[venues].mean(axis=1)
return df["percent_mc"] <= THRESHOLD
def load_and_clean_rader_raw(THRESHOLD=2, SMOOTH_RADER=True): # or less
DATA_IN = "data/raw/"
directory = DATA_IN + "rader/sm_cny_data_1_21_21.csv"
us = pd.read_csv(directory)
masks = [
"likely_wear_mask_exercising_outside",
"likely_wear_mask_grocery_shopping",
"likely_wear_mask_visit_family_friends",
"likely_wear_mask_workplace",
]
# weights = ["weight_daily_national_13plus", "weight_state_weekly"]
us = us[["response_date", "state"] + masks] # + weights
codes = pd.read_excel(DATA_IN + "rader/cny_sm_codebook_2_5_21.xls")
num2name = codes[codes["column"] == "state"][["value", "label"]]
us = pd.merge(us, num2name, left_on="state", right_on="value").drop(
["value", "state"], axis=1
)
us["response_date"] = pd.to_datetime(us["response_date"])
if RADER_JOINT:
us["percent_mc"] = joint_shop_work(us, THRESHOLD)
else:
us["percent_mc"] = mean_shop_work(us, THRESHOLD)
us = (
us[["response_date", "label", "percent_mc"]]
.groupby(["response_date", "label"])
.mean()
.reset_index()
)
if SMOOTH_RADER:
us = smooth_rader(us)
return us
def load_and_clean_rader(THRESHOLD=2, SMOOTH_RADER=True): # or less
DATA_IN = "data/raw/"
directory = DATA_IN + "rader/rader_us_wearing_aggregated_mean_shop_and_work.csv"
us = pd.read_csv(directory)
us["response_date"] = pd.to_datetime(us["response_date"])
return us
def add_dummy_wearing_us(us, backfill=True):
rader_start = us.date.iloc[0] - timedelta(days=1)
fill_days = pd.date_range(Ds[0], rader_start, freq="D")
Rs = us.country.unique()
if backfill:
for s in Rs:
df = pd.DataFrame(columns=["date", "country", "percent_mc"])
df.date = fill_days
df.country = s
fill = us.set_index(["country", "date"]).loc[s].percent_mc.iloc[0]
df.percent_mc = fill
us = pd.concat([df, us])
# totally random dummy
else:
for s in us.country.unique():
df = pd.DataFrame(columns=["date", "country", "percent_mc"])
df.date = fill_days
df.country = s
df.percent_mc = np.random.random(len(df))
us = pd.concat([df, us])
us = us.sort_values(["date", "country"])
return us
def load_and_clean_wearing():
wearing = pd.read_csv(
"data/raw/umd/umd_national_wearing.csv",
parse_dates=["survey_date"],
infer_datetime_format=True,
).drop_duplicates()
wearing = wearing[(wearing.survey_date >= Ds[0]) & (wearing.survey_date <= Ds[-1])]
cols = ["country", "survey_date", "percent_mc"]
wearing = wearing[cols]
cols = ["country", "date", "percent_mc"]
wearing.columns = cols
# Append US
us_wearing = load_and_clean_rader()
us_wearing.columns = ["date", "country", "percent_mc"]
us_wearing = us_wearing[cols]
us_wearing = us_wearing.replace("Georgia", "Georgia-US")
us_wearing = us_wearing.replace("District of Columbia (DC)", "District of Columbia")
# Add dummy wearing back to 1st May
us_wearing = add_dummy_wearing_us(us_wearing, backfill=True)
wearing = pd.concat([wearing, us_wearing])
return fill_missing_days(wearing)
def get_npi_names(df):
cs = [1, 2, 4, 6, 7]
npis = []
for i in cs:
npi = [c for c in df.columns if f"C{i}" in c]
npi = [c for c in npi if f"Flag" not in c][0]
npis += [npi]
npis += ["H6_Facial Coverings"]
return npis
def add_diffs(df, npis):
Rs = df.CountryName.unique()
df = df.set_index("CountryName")
for c in Rs:
df.loc[c, "H6_diff"] = df.loc[c]["H6_Facial Coverings"].diff()
for npi in npis:
i = npi[:2]
df.loc[c, f"{i}_diff"] = df.loc[c][npi].diff()
df.loc[c, f"{i}_flag_diff"] = df.loc[c][f"{i}_Flag"].diff()
return df.reset_index()
# Measure increases and flag 1 -> 0
def detect_regional_increase_national_off(df, npi):
return df[(df[f"{npi}_diff"] > 0) & (df[f"{npi}_flag_diff"] < 0)]
# Measure flag 0 -> 1
def detect_national_back_on(df, npi):
return df[df[f"{npi}_flag_diff"] > 0]
# & (df[f"{npi}_diff"] < 0) # Decrease only
def get_previous_national_value(start, df, country, npi):
previousValDate = start - timedelta(days=1)
c = df[df.CountryName == country]
previousValDf = c[c.date == previousValDate][npi]
l = list(previousValDf)
val = l[0]
return val
def impute_country_switch(row, df, npi, country_ends):
country = row["CountryName"]
start = row["date"]
code = npi[:2]
# Want the val of the day before the regional change
if start == Ds[0]:
return
previousVal = get_previous_national_value(start, df, country, npi)
isChangeImputed = False
# Only impute once per regional change:
while not isChangeImputed:
for _, end in country_ends.iterrows():
if end["date"] <= start:
continue
# `between` is inclusive so trim last day:
end = end["date"] - timedelta(days=1)
df.loc[
(df.CountryName == country) & (df.date.between(start, end)), npi
] = previousVal
df.loc[
(df.CountryName == country) & (df.date.between(start, end)),
code + "_Flag",
] = 1
isChangeImputed = True
break
# if OXCGRT never returns to national flag,
# impute to end of our window
if isChangeImputed == False:
end = Ds[-1]
df.loc[
(df.CountryName == country) & (df.date.between(start, end)), npi
] = previousVal
df.loc[
(df.CountryName == country) & (df.date.between(start, end)), code + "_Flag"
] = 1
return df
# Find regional NPI increases which obscure national NPIs
# Find dates of dataset returning to national policy
# Fill regional increases with previous level and the flag=1
# Up to next national measurement date
def find_and_impute_npi(df, npi, diffed):
code = npi[:2]
diff_col = code + "_diff"
flag_diff = code + "_flag_diff"
imputation_starts = detect_regional_increase_national_off(diffed, code)
imputation_ends = detect_national_back_on(diffed, code)
for i, row in imputation_starts.iterrows():
country = row["CountryName"]
country_ends = imputation_ends[imputation_ends.CountryName == country]
df = impute_country_switch(row, df, npi, country_ends)
return df
def fix_regional_overwrite(oxcgrt, npis):
npis = get_npi_names(oxcgrt)
diffed = add_diffs(oxcgrt, npis)
for npi in npis:
oxcgrt = find_and_impute_npi(oxcgrt, npi, diffed)
check_imputation(oxcgrt)
return oxcgrt
def load_oxcgrt(use_us=True):
OXCGRT_PATH = "data/raw/OxCGRT_latest.csv"
oxcgrt = pd.read_csv(OXCGRT_PATH, parse_dates=["Date"], low_memory=False)
# Drop regional data
nat = oxcgrt[oxcgrt.Jurisdiction == "NAT_TOTAL"]
# Add US states
if use_us:
states = oxcgrt[
(oxcgrt.CountryName == "United States")
& (oxcgrt.Jurisdiction == "STATE_TOTAL")
]
# Drop GEO to prevent name collision
nat = nat[nat.CountryName != "Georgia"]
states.CountryName = states.RegionName
states = states.replace("Georgia", "Georgia-US")
nat =
|
pd.concat([nat, states])
|
pandas.concat
|
import glob
import json
import logging
import matplotlib.patheffects as path_effects
import numpy as np
import os
import pandas as pd
import re
import matplotlib as mpl
# TODO: Reset this
# mpl.use('Agg')
import shutil
from os.path import basename
from matplotlib import pyplot as plt
from shutil import copyfile
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# DPI default used on plotting
DPI = 150
MAX_PATHS_ON_EPISODE_PLOT = 100
INPUT_PATH = 'output'
OUTPUT_PATH = os.path.join('output', 'images')
REPORT_PATH = os.path.join('output', 'report')
# This should be moved somewhere central. Shouldn't this be in run_experiment, or shared with run_experiment?
TO_PROCESS = {
'PI': {
'path': 'PI',
'file_regex': re.compile('(.*)_grid\.csv')
},
'VI': {
'path': 'VI',
'file_regex': re.compile('(.*)_grid\.csv')
},
'QL': {
'path': 'QL',
'file_regex': re.compile('(.*)_grid\.csv')
}
}
the_best = {}
WATERMARK = False
GATECH_USERNAME = 'DO NOT STEAL'
TERM = 'Spring 2019'
def watermark(p):
if not WATERMARK:
return p
ax = plt.gca()
for i in range(1, 11):
p.text(0.95, 0.95 - (i * (1.0/10)), '{} {}'.format(GATECH_USERNAME, TERM), transform=ax.transAxes,
fontsize=32, color='gray',
ha='right', va='bottom', alpha=0.2)
return p
def plot_episode_stats(title_base, stats, smoothing_window=50):
# Trim the DF down based on the episode lengths
stats = stats[stats['length'] > 0]
# Plot the episode length over time, both as a line and histogram
fig1 = plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.grid()
plt.tight_layout()
plt.plot(stats['length'])
plt.xlabel("Episode")
plt.ylabel("Episode Length")
plt.title("Episode Length over Time")
plt.subplot(122)
plt.hist(stats['length'], zorder=3)
plt.grid(zorder=0)
plt.xlabel("Episode Length")
plt.ylabel("Count")
plt.title(title_base.format("Episode Length (Histogram)"))
fig1 = watermark(fig1)
plt.tight_layout()
# Plot the episode reward over time
fig2 = plt.figure(figsize=(10, 5))
rewards_smoothed = pd.Series(stats['reward']).rolling(
smoothing_window, min_periods=smoothing_window
).mean()
plt.subplot(121)
plt.grid()
plt.tight_layout()
plt.plot(rewards_smoothed)
plt.xlabel("Episode")
plt.ylabel("Episode Reward (Smoothed)")
plt.title("Episode Reward over Time ({})".format(smoothing_window))
plt.subplot(122)
plt.hist(stats['reward'], zorder=3)
plt.grid(zorder=0)
plt.xlabel("Episode Reward")
plt.ylabel("Count")
plt.title(title_base.format("Episode Reward (Histogram)"))
fig2 = watermark(fig2)
plt.tight_layout()
# Plot time steps and episode number
fig3 = plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.grid()
plt.tight_layout()
time_steps = np.cumsum(stats['time'])
plt.plot(time_steps, np.arange(len(stats['time'])))
plt.xlabel("Time Steps")
plt.ylabel("Episode")
plt.title("Episode per time step")
plt.subplot(122)
plt.hist(time_steps, zorder=3)
plt.grid(zorder=0)
plt.xlabel("Time Step")
plt.ylabel("Count")
plt.title(title_base.format("Episode Time (Histogram)"))
fig3 = watermark(fig3)
plt.tight_layout()
return fig1, fig2, fig3
def plot_policy_map(title, policy, map_desc, color_map, direction_map, map_mask=None):
"""
:param title:
:param policy:
:param map_desc:
:param color_map:
:param direction_map:
:param map_mask: (OPTIONAL) Defines a mask in the same shape of policy that indicates which tiles should be printed.
Only elements that are True will have policy printed on the tile
:return:
"""
if map_mask is None:
map_mask = np.ones(policy.shape, dtype=bool)
fig = plt.figure()
# TODO: Does this xlim/ylim even do anything?
ax = fig.add_subplot(111)
# FEATURE: Handle this better
font_size = 'xx-small'
# font_size = 'x-large'
# if policy.shape[1] > 16 or len(direction_map[0]) > 2:
# font_size = 'small'
plt.title(title)
for i in range(policy.shape[0]):
for j in range(policy.shape[1]):
y = policy.shape[0] - i - 1
x = j
p = plt.Rectangle((x, y), 1, 1, edgecolor='k', linewidth=0.1)
p.set_facecolor(color_map[map_desc[i, j]])
ax.add_patch(p)
if map_mask[i, j]:
text = ax.text(x+0.5, y+0.5, str(direction_map[policy[i, j]]), weight='bold', size=font_size,
horizontalalignment='center', verticalalignment='center', color='k')
# TODO: Remove this?
# text.set_path_effects([path_effects.Stroke(linewidth=1, foreground='black'),
# path_effects.Normal()])
plt.axis('off')
plt.xlim((0, policy.shape[1]))
plt.ylim((0, policy.shape[0]))
plt.tight_layout()
return watermark(plt)
def plot_value_map(title, v, map_desc, color_map, map_mask=None):
"""
:param title:
:param v:
:param map_desc:
:param color_map:
:param map_mask: (OPTIONAL) Defines a mask in the same shape of policy that indicates which tiles should be printed.
Only elements that are True will have policy printed on the tile
:return:
"""
if map_mask is None:
map_mask = np.ones(v.shape, dtype=bool)
fig = plt.figure()
ax = fig.add_subplot(111)
# FEATURE: Fix this better
font_size = 'xx-small'
# font_size = 'x-large'
# if v.shape[1] > 16:
# font_size = 'small'
v_min = np.min(v)
v_max = np.max(v)
# TODO: Disable this in more reasonble way. Use input arg?
# bins = np.linspace(v_min, v_max, 100)
# v_red = np.digitize(v, bins)/100.0
# # Flip so that numbers are red when low, not high
# v_red = np.abs(v_red - 1)
for i in range(v.shape[0]):
for j in range(v.shape[1]):
value = np.round(v[i, j], 1)
if len(str(value)) > 3:
font_size = 'xx-small'
plt.title(title)
for i in range(v.shape[0]):
for j in range(v.shape[1]):
y = v.shape[0] - i - 1
x = j
p = plt.Rectangle([x, y], 1, 1, edgecolor='k', linewidth=0.1)
p.set_facecolor(color_map[map_desc[i, j]])
ax.add_patch(p)
value = np.round(v[i, j], 1)
# red = v_red[i, j]
# if map_desc[i, j] in b'HG':
# continue
if map_mask[i, j]:
text2 = ax.text(x+0.5, y+0.5, value, size=font_size, weight='bold',
horizontalalignment='center', verticalalignment='center', color='k')
# text2 = ax.text(x+0.5, y+0.5, value, size=font_size,
# horizontalalignment='center', verticalalignment='center', color=(1.0, 1.0-red, 1.0-red))
# text2.set_path_effects([path_effects.Stroke(linewidth=1, foreground='black'),
# path_effects.Normal()])
plt.axis('off')
plt.xlim((0, v.shape[1]))
plt.ylim((0, v.shape[0]))
plt.tight_layout()
return watermark(plt)
def plot_episodes(title, episodes, map_desc, color_map, direction_map, max_episodes=MAX_PATHS_ON_EPISODE_PLOT,
path_alpha=None, fig=None):
"""
Draw the paths of multiple episodes on a map.
:param title:
:param episodes:
:param map_desc:
:param color_map:
:param direction_map:
:param max_episodes: Maximum number of epsides plotted. If len(episodes)>max_episodes, max_episodes randomly chosen
episodes will be plotted
:param path_alpha:
:param fig:
:return:
"""
i_episodes = np.arange(len(episodes))
if len(episodes) > max_episodes:
i_episodes = np.random.choice(i_episodes, size=max_episodes, replace=False)
if path_alpha is None:
path_alpha = max(1.0 / len(i_episodes), 0.02)
if fig is None:
fig, ax = plt.subplots()
else:
ax = fig.get_axes()[-1]
# Plot the background map first
plot_map(map_desc, color_map, fig=fig)
for i_episode in i_episodes:
episode = episodes[i_episode]
fig = plot_episode(title=title, episode=episode, map_desc=map_desc, color_map=color_map,
direction_map=direction_map, annotate_actions=False, annotate_velocity=False,
path_alpha=path_alpha, fig=fig, plot_the_map=False)
# TODO: WATERMARK
return fig
def plot_map(map_desc, color_map, fig=None):
if fig is None:
fig, ax = plt.subplots()
else:
ax = fig.get_axes()[0]
for i in range(map_desc.shape[0]):
for j in range(map_desc.shape[1]):
y = map_desc.shape[0] - i - 1
x = j
p = plt.Rectangle((x, y), 1, 1, edgecolor='k', linewidth=0.1)
p.set_facecolor(color_map[map_desc[i, j]])
ax.add_patch(p)
ax.axis('off')
ax.set_xlim(0, map_desc.shape[1])
ax.set_ylim(0, map_desc.shape[0])
fig.tight_layout()
return fig
def plot_episode(title, episode, map_desc, color_map, direction_map, annotate_actions=True, annotate_velocity=True,
path_alpha=1.0, path_color='r', plot_the_map=True, fig=None, annotation_fontsize=6,
annotation_color='r', annotation_offset=-0.5):
"""
Plot an episode on the map, showing the path the agent has travelled during this episode.
:param title:
:param episode: List of (s,a,r,s') tuples describing an episode
:param map_desc:
:param color_map:
:param direction_map:
:param annotate_actions: If True, annotate all states with the action taken from them
:param annotate_velocity: If True, plot will be annotated with velocities for each transition (note this only works
for an environment that has state of (x, y, vx, vy))
:param path_color: Color used for the trace of agent's path
:param path_alpha: Float alpha parameter passed to matplotlib when plotting the route. If plotting multiple routes
to same figure, use alpha<1.0 to improve clarity (more frequently travelled routes will be
darker).
:param plot_the_map: If True, plot the background map as matplotlib rectangles (if calling this function to plot
episodes, set this to False in all but one call to reduce plotting time
:param fig: Optional matplotlib figure object for plotting on an existing figure (will be created if omitted)
:param annotation_fontsize: Fontsize for all annotations
:param annotation_color: Color for all annotations
:param annotation_offset: Offset in x and y for all annotations (note this generally only works well as a negative)
:return: Matplotlib figure object used
"""
if fig is None:
fig, ax = plt.subplots()
else:
ax = fig.get_axes()[0]
fig.suptitle(title)
if plot_the_map:
plot_map(map_desc, color_map, fig=fig)
# print(f'map_desc.shape = {map_desc.shape}')
for transition in episode:
if isinstance(transition[0], int):
# Integer states. Need to infer location by wrapping into a grid of same shape as the map
i, j = np.unravel_index(transition[0], map_desc.shape)
x = j + 0.5
y = map_desc.shape[0] - i - 1 + 0.5
i_end, j_end = np.unravel_index(transition[3], map_desc.shape)
x_end = j_end + 0.5
y_end = map_desc.shape[0] - i_end - 1 + 0.5
else:
# Transition traces go from s to s', but shift by +0.5, +0.5 because the map is plotted by the bottom left
# corner coordinate
x = transition[0][0] + 0.5
y = transition[0][1] + 0.5
x_end = transition[3][0] + 0.5
y_end = transition[3][1] + 0.5
# print(f'x,y = {x,y}')
# Plot the path
ax.plot((x, x_end), (y, y_end), '-o', color=path_color, alpha=path_alpha)
if annotate_velocity:
# Velocity is next velocity, so pull from the s-prime
v_xy = transition[3][2:]
# Annotate with the velocity of a move
arrow_xy = ((x + x_end) / 2, (y + y_end) / 2)
annotation_xy = (arrow_xy[0] + annotation_offset, arrow_xy[1] + annotation_offset)
ax.annotate(f'v={str(v_xy)}',
xy=arrow_xy,
xytext=annotation_xy,
color=annotation_color,
arrowprops={'arrowstyle': '->', 'color': annotation_color},
horizontalalignment='right',
verticalalignment='bottom',
)
if annotate_actions:
action = transition[1]
arrow_xy = (x, y)
annotation_xy = (arrow_xy[0] + annotation_offset, arrow_xy[1] + annotation_offset)
ax.annotate(f'a={str(action)}',
xy=arrow_xy,
xytext=annotation_xy,
color=annotation_color,
arrowprops={'arrowstyle': '->', 'color': annotation_color},
horizontalalignment='right',
verticalalignment='bottom',
)
ax.axis('off')
ax.set_xlim(0, map_desc.shape[1])
ax.set_ylim(0, map_desc.shape[0])
fig.tight_layout()
# TODO: WATERMARK
return fig
def plot_time_vs_steps(title, df, xlabel="Steps", ylabel="Time (s)"):
plt.close()
plt.figure()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid()
plt.plot(df.index.values, df['time'], '-', linewidth=1)
plt.legend(loc="best")
plt.tight_layout()
return watermark(plt)
def plot_reward_and_delta_vs_steps(title, df, xlabel="Steps", ylabel="Reward"):
plt.close()
plt.figure()
f, (ax) = plt.subplots(1, 1)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
lns1 = ax.plot(df.index.values, df['reward'], color='green', linewidth=1, label=ylabel)
ex_ax = ax.twinx()
lns2 = ex_ax.plot(df.index.values, df['delta'], color='blue', linewidth=1, label='Delta')
ex_ax.set_ylabel('Delta')
ex_ax.tick_params('y')
ax.grid()
ax.axis('tight')
lns = lns1 + lns2
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=0)
f.tight_layout()
return watermark(plt)
# Adapted from http://code.activestate.com/recipes/578293-unicode-command-line-histograms/
def cli_hist(data, bins=10):
bars = u' โโโโโ
โโโ'
n, bin_edges = np.histogram(data, bins=bins)
n2 = map(int, np.floor(n*(len(bars)-1)/(max(n))))
res = u' '.join(bars[i] for i in n2)
return res
# Adapted from https://gist.github.com/joezuntz/2f3bdc2ab0ea59229907
def ascii_hist(data, bins=10):
N, X = np.histogram(data, bins=bins)
total = 1.0 * len(data)
width = 50
nmax = N.max()
lines = []
for (xi, n) in zip(X, N):
bar = '#' * int(n * 1.0 * width / nmax)
xi = '{0: <8.4g}'.format(xi).ljust(10)
lines.append('{0}| {1}'.format(xi, bar))
return lines
def fetch_mdp_name(file, regexp):
search_result = regexp.search(basename(file))
if search_result is None:
return False, False
mdp_name = search_result.groups()[0]
return mdp_name, ' '.join(map(lambda x: x.capitalize(), mdp_name.split('_')))
# def process_params(problem_name, params):
# param_str = '{}'.format(params['discount_factor'])
# if problem_name == 'QL':
# param_str = '{}_{}_{}_{}_{}'.format(params['alpha'], params['q_init'], params['epsilon'],
# params['epsilon_decay'], params['discount_factor'])
#
# return param_str
def find_optimal_params(problem_name, base_dir, file_regex):
# FEATURE: Add something to catch failures here when leftover files from other runs are present.
grid_files = glob.glob(os.path.join(base_dir, '*_grid*.csv'))
logger.info("Grid files {}".format(grid_files))
best_params = {}
for f in grid_files:
mdp, readable_mdp = fetch_mdp_name(f, file_regex)
logger.info("MDP: {}, Readable MDP: {}".format(mdp, readable_mdp))
df = pd.read_csv(f)
# Why copy this?
best = df.copy()
# Attempt to find the best params. First look at the reward mean, then median, then max. If at any point we
# have more than one result as "best", try the next criterion
for criterion in ['reward_mean', 'reward_median', 'reward_max']:
best_value = np.max(best[criterion])
best = best[best[criterion] == best_value]
if best.shape[0] == 1:
break
# If we have more than one best, take the highest index.
if best.shape[0] > 1:
best = best.iloc[-1:]
params = best.iloc[-1]['params']
params = json.loads(params)
best_index = best.iloc[-1].name
best_params[mdp] = {
'name': mdp,
'readable_name': readable_mdp,
'index': best_index,
'params': params,
'param_str': params_to_filename_base(**params)
}
return best_params
def find_policy_images(base_dir, params):
# FEATURE: This image grabber does not handle cases when velocity or other extra state variables are present. Fix
policy_images = {}
for mdp in params:
mdp_params = params[mdp]
fileStart = os.path.join(base_dir, '{}{}'.format(mdp_params['name'], mdp_params['param_str']))
image_files = glob.glob(fileStart + '*.png')
if len(image_files) == 2:
policy_file = None
value_file = None
for image_file in image_files:
if 'Value' in image_file:
value_file = image_file
else:
policy_file = image_file
logger.info("Value file {}, Policy File: {}".format(value_file, policy_file))
policy_images[mdp] = {
'value': value_file,
'policy': policy_file
}
elif len(image_files) < 2:
logger.error("Unable to find image file for {} with params {}".format(mdp, mdp_params))
else:
logger.warning("Found {} image files for {} with params {} ".format(len(image_files), mdp, mdp_params) + \
"- too many files, nothing copied")
return policy_images
def find_data_files(base_dir, params):
data_files = {}
for mdp in params:
mdp_params = params[mdp]
print('find data files: ' + '{}{}.csv'.format(mdp_params['name'], mdp_params['param_str']))
files = glob.glob(os.path.join(base_dir, '{}{}.csv'.format(mdp_params['name'], mdp_params['param_str'])))
optimal_files = glob.glob(os.path.join(base_dir, '{}{}_optimal.csv'.format(mdp_params['name'], mdp_params['param_str'])))
episode_files = glob.glob(os.path.join(base_dir, '{}{}_episode.csv'.format(mdp_params['name'], mdp_params['param_str'])))
logger.info("files {}".format(files))
logger.info("optimal_files {}".format(optimal_files))
logger.info("episode_files {}".format(episode_files))
data_files[mdp] = {
'file': files[0],
'optimal_file': optimal_files[0]
}
if len(episode_files) > 0:
data_files[mdp]['episode_file'] = episode_files[0]
return data_files
def copy_best_images(best_images, base_dir):
for problem_name in best_images:
for mdp in best_images[problem_name]:
mdp_files = best_images[problem_name][mdp]
dest_dir = os.path.join(base_dir, problem_name)
policy_image = mdp_files['policy']
value_image = mdp_files['value']
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
policy_dest = os.path.join(dest_dir, basename(policy_image))
value_dest = os.path.join(dest_dir, basename(value_image))
logger.info("Copying {} to {}".format(policy_image, policy_dest))
logger.info("Copying {} to {}".format(value_image, value_dest))
copyfile(policy_image, policy_dest)
copyfile(value_image, value_dest)
def copy_data_files(data_files, base_dir):
for problem_name in data_files:
for mdp in data_files[problem_name]:
mdp_files = data_files[problem_name][mdp]
dest_dir = os.path.join(base_dir, problem_name)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for file_type in mdp_files:
file_name = mdp_files[file_type]
file_dest = os.path.join(dest_dir, basename(file_name))
logger.info("Copying {} file from {} to {}".format(file_type, file_name, file_dest))
copyfile(file_name, file_dest)
def plot_data(data_files, envs, base_dir):
for problem_name in data_files:
for mdp in data_files[problem_name]:
env = lookup_env_from_mdp(envs, mdp)
if env is None:
logger.error("Unable to find env for MDP {}".format(mdp))
return
mdp_files = data_files[problem_name][mdp]
step_term = 'Steps'
if problem_name == 'QL':
step_term = 'Episodes'
df =
|
pd.read_csv(mdp_files['file'])
|
pandas.read_csv
|
import nose
import os
import numpy as np
import pandas as pd
from pandas import (merge_asof, read_csv,
to_datetime, Timedelta)
from pandas.tools.merge import MergeError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal
class TestAsOfMerge(tm.TestCase):
_multiprocess_can_split_ = True
def read_data(self, name, dedupe=False):
path = os.path.join(tm.get_data_path(), name)
x = read_csv(path)
if dedupe:
x = (x.drop_duplicates(['time', 'ticker'], keep='last')
.reset_index(drop=True)
)
x.time = to_datetime(x.time)
return x
def setUp(self):
self.trades = self.read_data('trades.csv')
self.quotes = self.read_data('quotes.csv', dedupe=True)
self.asof = self.read_data('asof.csv')
self.tolerance = self.read_data('tolerance.csv')
self.allow_exact_matches = self.read_data('allow_exact_matches.csv')
self.allow_exact_matches_and_tolerance = self.read_data(
'allow_exact_matches_and_tolerance.csv')
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
pd.merge_asof(left, right, on='a')
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.038',
'20160525 13:30:00.048',
'20160525 13:30:00.048',
'20160525 13:30:00.048']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.048',
'20160525 13:30:00.049',
'20160525 13:30:00.072',
'20160525 13:30:00.075']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL', 'GOOG',
'MSFT'],
'bid': [720.50, 51.95, 51.97, 51.99,
720.50, 97.99, 720.50, 52.01],
'ask': [720.93, 51.96, 51.98, 52.00,
720.93, 98.01, 720.88, 52.03]},
columns=['time', 'ticker', 'bid', 'ask'])
pd.merge_asof(trades, quotes,
on='time',
by='ticker')
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('2ms'))
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('10ms'),
allow_exact_matches=False)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype('category')
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype('category')
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != 'MSFT']
result = merge_asof(trades, q,
on='time',
by='ticker')
expected.loc[expected.ticker == 'MSFT', ['bid', 'ask']] = np.nan
assert_frame_equal(result, expected)
def test_basic2(self):
expected = self.read_data('asof2.csv')
trades = self.read_data('trades2.csv')
quotes = self.read_data('quotes2.csv', dedupe=True)
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = lambda x: x[x.ticker == 'MSFT'].drop('ticker', axis=1) \
.reset_index(drop=True)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes,
on='time')
assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
left_on='time',
right_on='bid',
by='ticker')
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
on=['time', 'ticker'],
by='ticker')
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
by='ticker')
def test_with_duplicates(self):
q = pd.concat([self.quotes, self.quotes]).sort_values(
['time', 'ticker']).reset_index(drop=True)
result = merge_asof(self.trades, q,
on='time',
by='ticker')
expected = self.read_data('asof.csv')
|
assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
#imports
import numpy
import pandas
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix, cohen_kappa_score, classification_report
from sklearn.model_selection import cross_val_score
from sklearn.cluster import KMeans
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import matplotlib.pyplot as plt
#reading the csv
rental_df = pandas.read_json('train.json')
#analisis
print(rental_df.head(10))
print(rental_df.dtypes)
print(rental_df.apply(lambda x: x.isnull().any()))
print(rental_df.describe())
#boxplot usage
bathroom_data = rental_df['bathrooms']
bedroom_data = rental_df['bedrooms']
price_data = rental_df['price']
# plt.boxplot(price_data)
# plt.show()
#data preparation
# outliers
bathroom_upper_limit = 8
bathroom_lower_limit = 1
bedroom_upper_limit = 5.5
price_upper_limit = 50000
bathroom_data.loc[rental_df['bathrooms']>bathroom_upper_limit] = bathroom_upper_limit
bedroom_data.loc[rental_df['bedrooms']>bedroom_upper_limit] = bedroom_upper_limit
price_data.loc[rental_df['price']>price_upper_limit] = price_upper_limit
bathroom_data.loc[rental_df['bathrooms']<bathroom_lower_limit] = bathroom_lower_limit
# plt.boxplot(price_data)
# plt.show()
# # processing the data
processed_df = rental_df.drop(['features','photos','description','display_address','street_address'],axis=1)
processed_df['building_id'] = rental_df.building_id.astype('category')
processed_df['created'] = rental_df.created.astype('category')
processed_df['listing_id'] = rental_df.listing_id.astype('category')
processed_df['manager_id'] = rental_df.manager_id.astype('category')
processed_df['building_id'] = pandas.get_dummies(processed_df['building_id'])
processed_df['created'] =
|
pandas.get_dummies(processed_df['created'])
|
pandas.get_dummies
|
"""
Plot the evaluation results.
"""
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
datasets = ['MLB', 'SportSett', 'SumTime', 'Obituary']
def save_df_dist_plot(df):
"""
"""
plt.rcParams.update({'font.size': 12})
ax = df.transpose().plot.bar(figsize=(9, 5), rot=0)#, marker='*', markersize=10, linewidth=3)
for p in ax.patches:
ax.annotate(f'{str(int(p.get_height()))}', (p.get_x() * 1.005, (p.get_height() * 1.005) + 2))
ax.set_title(f'Evaluation Results')
ax.set_ylim(0, 110)
# ax.set_ylabel('Percentage')
# ax.set_xticklabels(datasets)
plt.rcParams.update({'font.size': 12})
ax.figure.tight_layout()
for dataset in datasets:
ax.figure.savefig(f'./{dataset.lower()}/output/plots/ct_dist/eval.png', dpi=300)
df.to_csv(f'./{dataset.lower()}/output/csvs/ct_dist/eval.csv')
def main():
d = {
'Accuracy': [],
'BLEU': [],
'METEOR': [],
'chrF++': [],
'BERT-SCORE F1': [],
'ROUGE-L F1': [],
}
decimal_keys = ['METEOR', 'chrF++', 'ROUGE-L F1']
real_keys = ['BLEU', 'BERT-SCORE F1']
datasets = ['MLB', 'SportSett', 'SumTime', 'Obituary']
for dataset in datasets:
# auto_file_name = "neural_t5" if dataset == "MLB" or dataset == 'SportSett' else "neural"
auto_file_name = "neural"
auto = json.load(open(f"{dataset.lower()}/eval/jsons/{auto_file_name}.json"))
for k, v in auto.items():
if k in real_keys:
# print(k, d)
d[k].append(v)
elif k in decimal_keys:
d[k].append(v * 100)
# acc_file_name = "neural_t5" if dataset == 'SportSett' else "neural" #dataset == "MLB" or
acc_file_name = "neural"
accs = json.load(open(f"./{dataset.lower()}/eval/jsons/{acc_file_name}_acc_err.json"))
total_acc = 0
for k, v in accs.items():
total_acc += v
d['Accuracy'].append(total_acc)
df =
|
pd.DataFrame(d, index=datasets)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
from dku_timeseries import IntervalRestrictor
from recipe_config_loading import get_interval_restriction_params
@pytest.fixture
def datetime_column():
return "Date"
@pytest.fixture
def df(datetime_column):
co2 = [315.58, 316.39, 316.79, 316.2]
country = ["first", "first", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="M")
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, datetime_column: time_index})
return df
@pytest.fixture
def long_df(datetime_column):
co2 = [315.58, 316.39, 100, 116.2, 345, 234, 201, 100]
country = ["first", "first", "first", "first", "second", "second", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, datetime_column: time_index})
return df
@pytest.fixture
def long_df_2(datetime_column):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10]
country = ["first", "first", "second", "second", "third", "third"]
country_2 = ["first", "first", "second", "second", "third", "third"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, "item": country_2, datetime_column: time_index})
return df
@pytest.fixture
def long_df_3(datetime_column):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 319, 250, 300]
country = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_2 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_3 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M")).append(pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, "item": country_2, "store": country_3, datetime_column: time_index})
return df
@pytest.fixture
def long_df_4(datetime_column):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 319, 250, 300]
country = ["first", "first", "second", "second", "third", "third", "first", "first"]
country_2 = ["first", "first", "second", "second", "third", "third", "second", "first"]
country_3 = ["first", "first", "second", "second", "third", "third", "third", "fourth"]
time_index = pd.date_range("1-1-2020", periods=2, freq="M").append(pd.date_range("1-1-2020", periods=2, freq="M")).append(
pd.date_range("1-1-2020", periods=2, freq="M")).append(
|
pd.date_range("1-1-2020", periods=2, freq="M")
|
pandas.date_range
|
import pandas as pd
import matplotlib.pyplot as plt
import requests
import numpy as np
from math import floor
from termcolor import colored as cl
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (20, 10)
# EXTRACTING STOCK DATA
def get_historical_data(symbol, start_date):
api_key = 'YOUR API KEY'
api_url = f'https://api.twelvedata.com/time_series?symbol={symbol}&interval=1day&outputsize=5000&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df['values']).iloc[::-1].set_index('datetime').astype(float)
df = df[df.index >= start_date]
df.index = pd.to_datetime(df.index)
return df
aapl = get_historical_data('AAPL', '2020-01-01')
aapl
def get_rsi(close, lookback):
ret = close.diff()
up = []
down = []
for i in range(len(ret)):
if ret[i] < 0:
up.append(0)
down.append(ret[i])
else:
up.append(ret[i])
down.append(0)
up_series = pd.Series(up)
down_series = pd.Series(down).abs()
up_ewm = up_series.ewm(com = lookback - 1, adjust = False).mean()
down_ewm = down_series.ewm(com = lookback - 1, adjust = False).mean()
rs = up_ewm/down_ewm
rsi = 100 - (100 / (1 + rs))
rsi_df =
|
pd.DataFrame(rsi)
|
pandas.DataFrame
|
from textwrap import dedent
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from .. import params
@pytest.fixture
def initial_params_dict():
return dict(alpha=1.0, beta=2.0)
@pytest.fixture
def initial_params_series():
return pd.Series([1.0, 2.0], ["alpha", "beta"])
@pytest.fixture
def basic_paramset():
p = pd.Series([1.0, 2.0], ["alpha", "beta"])
return params.ParamSet(p)
@pytest.fixture
def fixed_paramset():
p = pd.Series([1.0, 2.0], ["alpha", "beta"])
return params.ParamSet(p, ["beta"])
def test_paramset_series_initialization(initial_params_series):
p = params.ParamSet(initial_params_series)
pdt.assert_series_equal(p.params, initial_params_series)
pdt.assert_series_equal(p.free, initial_params_series)
pdt.assert_series_equal(p.fixed, pd.Series([]))
def test_paramset_dict_initialization(initial_params_dict):
p = params.ParamSet(initial_params_dict)
params_series = pd.Series(initial_params_dict)
pdt.assert_series_equal(p.params, params_series)
pdt.assert_series_equal(p.free, params_series)
pdt.assert_series_equal(p.fixed, pd.Series([]))
def test_paramset_fixed_initialization(initial_params_series):
free_names = ["alpha"]
fixed_names = ["beta"]
p = params.ParamSet(initial_params_series, fixed_names)
pdt.assert_series_equal(p.params, initial_params_series)
pdt.assert_series_equal(p.free, initial_params_series[free_names])
pdt.assert_series_equal(p.fixed, initial_params_series[fixed_names])
assert p.free_names == free_names
assert p.fixed_names == fixed_names
with pytest.raises(ValueError):
p = params.ParamSet(initial_params_series, ["gamma"])
def test_paramset_repr(basic_paramset, fixed_paramset):
expected_repr_without_fixed = dedent("""\
Free Parameters:
alpha: 1
beta: 2
""")
assert basic_paramset.__repr__() == expected_repr_without_fixed
expected_repr_with_fixed = dedent("""\
Free Parameters:
alpha: 1
Fixed Parameters:
beta: 2
""")
assert fixed_paramset.__repr__() == expected_repr_with_fixed
def test_paramset_paramset_update(basic_paramset):
update_series = pd.Series([3.0, 4.0], ["alpha", "beta"])
update_paramset = params.ParamSet(update_series)
new_paramset = basic_paramset.update(update_paramset)
pdt.assert_series_equal(basic_paramset.params, new_paramset.params)
pdt.assert_series_equal(basic_paramset.params, update_series)
def test_paramset_series_update(basic_paramset):
update = pd.Series([3.0, 4.0], ["alpha", "beta"])
new_paramset = basic_paramset.update(update)
|
pdt.assert_series_equal(basic_paramset.params, new_paramset.params)
|
pandas.util.testing.assert_series_equal
|
import functools
import warnings
warnings.filterwarnings('ignore')
import pickle
import numpy as np
import pandas as pd
import json
from textblob import TextBlob
import ast
import nltk
nltk.download('punkt')
from scipy import spatial
import torch
import spacy
import PyPDF2 as PyPDF2
import tabula as tabula
import tika
tika.initVM()
from tika import parser
from models import InferSent
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from flaskr.db import get_db
bp = Blueprint('book', __name__, url_prefix='/book')
@bp.route('/initialpage', methods=('GET', 'POST'))
def initialpage():
if request.method == 'POST':
book_file = request.form['book']
parsed = parser.from_file(book_file)
book = parsed["content"]
question = request.form['question']
db = get_db()
error = None
if not book:
error = 'Book is required.'
elif not question:
error = 'Question is required.'
if error is None:
if db.execute('SELECT book, question FROM bq').fetchone() is None:
db.execute('INSERT INTO bq (book, question) VALUES (?, ?)',(book, question))
db.commit()
bq = db.execute('SELECT * FROM bq WHERE (book, question) = (?, ?)',(book, question)).fetchone()
session.clear()
session['bq_id'] = bq['id']
return redirect(url_for('book.finalpage'))
flash(error)
return render_template('book/initialpage.html')
@bp.route('/finalpage')
def finalpage():
bq_id = session.get('bq_id')
if bq_id is None:
g.bq = None
else:
g.bq = get_db().execute('SELECT * FROM bq WHERE id = ?', (bq_id,)).fetchone()
context = g.bq['book']
questions = []
contexts = []
questions.append(g.bq['question'])
contexts.append(g.bq['book'])
df = pd.DataFrame({"context":contexts, "question": questions})
df.to_csv("flaskr/data/train.csv", index = None)
blob = TextBlob(context)
sentences = [item.raw for item in blob.sentences]
from models import InferSent
V = 1
MODEL_PATH = 'flaskr/encoder/infersent%s.pkl' % V
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,'pool_type': 'max', 'dpout_model': 0.0, 'version': V}
infersent = InferSent(params_model)
infersent.load_state_dict(torch.load(MODEL_PATH))
W2V_PATH = 'flaskr/GloVe/glove.840B.300d.txt'
infersent.set_w2v_path(W2V_PATH)
infersent.build_vocab(sentences, tokenize=True)
dict_embeddings = {}
for i in range(len(sentences)):
dict_embeddings[sentences[i]] = infersent.encode([sentences[i]], tokenize=True)
for i in range(len(questions)):
dict_embeddings[questions[i]] = infersent.encode([questions[i]], tokenize=True)
d1 = {key:dict_embeddings[key] for i, key in enumerate(dict_embeddings) if i % 2 == 0}
d2 = {key:dict_embeddings[key] for i, key in enumerate(dict_embeddings) if i % 2 == 1}
with open('flaskr/data/dict_embeddings1.pickle', 'wb') as handle:
pickle.dump(d1, handle)
with open('flaskr/data/dict_embeddings2.pickle', 'wb') as handle:
pickle.dump(d2, handle)
del dict_embeddings
train =
|
pd.read_csv("flaskr/data/train.csv")
|
pandas.read_csv
|
import os
import pandas as pd
from groups import groups
def compute_uptake(cohort, event_col, stratification_col):
stratification_series = cohort[stratification_col]
stratification_vals = sorted(stratification_series.value_counts().index)
event_dates = cohort[cohort[event_col].notnull()][event_col]
if event_dates.empty:
return
earliest, latest = min(event_dates), max(event_dates)
index = [str(date.date()) for date in pd.date_range(earliest, latest)]
uptake = pd.DataFrame(index=index)
for stratification_val in stratification_vals:
filtered = cohort[stratification_series == stratification_val]
series =
|
pd.Series(0, index=index)
|
pandas.Series
|
import pandas as pd
import numpy as np
import csv
import os
import ODIR_evaluation
CSVFILE = "/work/ocular-dataset/full_df.csv"
FEATHERFILE = "/work/ocular-dataset/features/vgg16-imagenet.ft"
XLSXFILE = "/work/ocular-dataset/ODIR-5K/data.xlsx"
TRAIN_GT = "/work/exps/train_gt.csv"
VAL_GT = "/work/exps/val_gt.csv"
EYE_TRAIN_GT = "/work/exps/eye_labels_train.csv"
EYE_VAL_GT = "/work/exps/eye_labels_val.csv"
VAL_GT_XLSX = "/work/exps/val_gt.xlsx"
GT_HEADER = ['ID', 'N', 'D', 'G', 'C', 'A', 'H', 'M', 'O']
SEED = 13
class ODIR_Dataset:
def __init__(self):
# read file with feature vectors
df = pd.read_feather(FEATHERFILE)
feature_dict = pd.Series(df.feature_vector.values, index=df.path).to_dict()
# read file with labels from each eye
csvfile = pd.read_csv(CSVFILE)
labels_dict =
|
pd.Series(csvfile.target.values, index=csvfile.filename)
|
pandas.Series
|
import numpy as np
import pandas as pd
import time
from joblib import Parallel, delayed
import multiprocessing
data = pd.read_csv("./Data/train_c300_d100.csv", header=None)
data1 = pd.DataFrame({"toothed":["True","True","True","False","True","True","True","True","True","False"],
"hair":["True","True","False","True","True","True","False","False","True","False"],
"breathes":["True","True","True","True","True","True","False","True","True","True"],
"legs":["True","True","False","True","True","True","False","False","True","True"],
"species":["Mammal","Mammal","Reptile","Mammal","Mammal","Mammal","Reptile","Reptile","Mammal","Reptile"]},
columns=["toothed","hair","breathes","legs","species"])
data2 =
|
pd.DataFrame({"Outlook":["Sunny","Sunny","Overcast","Rain","Rain","Rain","Overcast","Sunny","Sunny","Rain","Sunny","Overcast","Overcast","Rain"],
"Temp":["Hot","Hot","Hot","Mild","Cool","Cool","Cool","Mild","Cool","Mild","Mild","Mild","Hot","Mild"],
"Humidity":["High","High","High","High","Normal","Normal","Normal","High","Normal","Normal","Normal","High","Normal","High"],
"Wind":["Weak","Strong","Weak","Weak","Weak","Strong","Strong","Weak","Weak","Weak","Strong","Strong","Weak","Strong"],
"Play":["No","No","Yes","Yes","Yes","No","Yes","No","Yes","Yes","Yes","Yes","Yes","No"]},
columns=["Outlook","Temp","Humidity","Wind","Play"])
|
pandas.DataFrame
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import re
def limpa_casos_h(dados:pd.DataFrame, hep:str):
'''
Funรงรฃo que recebe um dataset com dados sobre casos relacionado ร um tipo de hepatite e realiza algumas transformaรงรตes:
Retira as colunas que agrupam diversos anos, coluna 'Total' e coluna '1999-2006'
Retira a segunda linha do dataframe que se refere ร taxa de incidรชncia e, primeiramente serรฃo usados apenas os dados de casos totais para juntar os dados recentes e mais antigos
Transforma a disposiรงรฃo da tabela, transformando as colunas em apenas uma coluna('Ano') atribuindo o vรญrus da hepatite(A,B,C) como nome da coluna de valores numรฉricos
Retira a primeira linha que antes se referia ao nome da variรกvel numรฉrica('Casos')
Transforma o tipo de dados da coluna 'Ano' em inteiro e a coluna com os dados numรฉricos para float(podem haver valores nulos) para depois facilitar a manipulaรงรฃo e evitar problemas
Parรขmetros:
dados : DataFrame onde estรฃo os dados mais recentes dos casos de hepatite, tipo : pd.DataFrame
hep : o vรญrus da hepatite que o DataFrame se refere, tipo : str
Retorno:
dados : DataFrame com todas as limpezas realizadas, tipo : pd.DataFrame
'''
dados = dados.drop(columns=['Total', '1999-2006'])
dados = dados.drop(index=1)
dados = pd.melt(dados, var_name='Ano', value_name=hep)
dados = dados.drop(0)
dados[hep] = dados[hep].astype('float64')
dados['Ano'] = dados['Ano'].astype('int64')
return dados
def limpa_casos_regiao(dados_hep_A:pd.DataFrame, dados_hep_B:pd.DataFrame, dados_hep_C:pd.DataFrame, regiao:str):
'''
Funรงรฃo que recebe os DataFrames relacionados ร cada virus de uma regiรฃo especifรญca e realiza algumas transformaรงรตes:
Chama a funรงรฃo 'limpa_casos_h' para cada tipo de vรญrus para realizar uma limpeza em cada DataFrame
Junta os dados de todas os vรญrus(A, B, C)
Transforma as colunas relacionadas ao vรญrus em apenas uma coluna('virus') e atribui a 'regiao' como nome da coluna com os valore numรฉricos
Parรขmetros:
dados_hep_A : DataFrame onde estรฃo os dados mais recentes dos casos de hepatite A, tipo : pd.DataFrame
dados_hep_B : DataFrame onde estรฃo os dados mais recentes dos casos de hepatite B, tipo : pd.DataFrame
dados_hep_C : DataFrame onde estรฃo os dados mais recentes dos casos de hepatite C, tipo : pd.DataFrame
regiao : Nome da regiรฃo que o DataFrame se refere, tipo : pd.DataFrame
Retorno :
dados : retorna um Dataframe com os dados sobre casos de hepatite A, B e C de uma regiรฃo especรญfica, tipo : pd.DataFrame
'''
hep_A = limpa_casos_h(dados_hep_A, 'A')
hep_B = limpa_casos_h(dados_hep_B, 'B')
hep_C = limpa_casos_h(dados_hep_C, 'C')
dados = pd.merge(hep_A,
|
pd.merge(hep_B, hep_C)
|
pandas.merge
|
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.index import Index, Int64Index
from pandas.tseries.frequencies import infer_freq, to_offset
from pandas.tseries.offsets import DateOffset, generate_range, Tick
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if isinstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if isinstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if isinstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, datetime):
func = getattr(self, opname)
result = func(_to_m8(other))
elif isinstance(other, np.ndarray):
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if isinstance(other, timedelta):
func = getattr(self, opname)
return func(np.timedelta64(other))
else:
func = getattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeSeriesError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_groupby = lib.groupby_arrays # _wrap_i8_function(lib.groupby_int64)
_arrmap = _wrap_dt_function(_algos.arrmap_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not isinstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if isinstance(freq, basestring):
freq = to_offset(freq)
else:
if isinstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
if isinstance(data, datetime):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.to_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if isinstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', copy=copy)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', copy=copy)
else:
subarr = tools.to_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_get_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and len(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not isinstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not isinstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None:
# Convert local to UTC
ints = index.view('i8')
lib.tz_localize_check(ints, tz)
index = lib.tz_convert(ints, tz, _utc())
index = index.view('M8[ns]')
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_get_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = np.array(_to_m8_array(list(xdr)),
dtype='M8[ns]', copy=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(isinstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(isinstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_to_pydatetime(self.asi8)
def __repr__(self):
from pandas.core.format import _format_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
summary = str(self.__class__)
if len(self) > 0:
first = _format_datetime64(values[0], tz=self.tz)
last = _format_datetime64(values[-1], tz=self.tz)
summary += '\n[%s, ..., %s]' % (first, last)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
summary += tagline % (len(self), freq, self.tz)
return summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if len(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
elif len(state) == 3:
# legacy format: daterange
offset = state[1]
if len(state) > 2:
tzinfo = state[2]
else: # pragma: no cover
tzinfo = None
self.offset = offset
self.tzinfo = tzinfo
# extract the raw datetime data, turn into datetime64
index_state = state[0]
raw_data = index_state[0][4]
raw_data = np.array(raw_data, dtype='M8[ns]')
new_state = raw_data.__reduce__()
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if isinstance(other, Index):
return self.union(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shift(other)
else:
return Index(self.view(np.ndarray) + other)
def __sub__(self, other):
if isinstance(other, Index):
return self.diff(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shift(-other)
else:
return Index(self.view(np.ndarray) - other)
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view('M8[ns]')
else:
new_values = self.astype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def summary(self, name=None):
if len(self) > 0:
index_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
return Index.astype(self, dtype)
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def asstruct(self):
if self._sarr_cache is None:
self._sarr_cache = lib.build_field_sarray(self.asi8)
return self._sarr_cache
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
boxed_values = _dt_box_array(self.asi8, self.offset, self.tz)
return Index(boxed_values, dtype=object)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.tseries.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = self.freqstr
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occuring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype='M8[ns]')
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shift(self, n, freq=None):
"""
Specialized shift which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shift by
freq : DateOffset or timedelta-like, optional
Returns
-------
shifted : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if isinstance(freq, basestring):
freq = to_offset(freq)
return
|
Index.shift(self, n, freq)
|
pandas.core.index.Index.shift
|
"""PyLabel currently supports exporting annotations in COCO, YOLO, and VOC PASCAL formats."""
import json
from typing import List
import pandas as pd
import numpy as np
import xml.etree.ElementTree as ET
import xml.dom.minidom
import os
import yaml
import shutil
from pylabel.shared import _ReindexCatIds
from pathlib import PurePath, Path
class Export:
def __init__(self, dataset=None):
self.dataset = dataset
def ExportToVoc(
self,
output_path=None,
segmented_=False,
path_=False,
database_=False,
folder_=False,
occluded_=False,
):
"""Writes annotation files to disk in VOC XML format and returns path to files.
By default, tags with empty values will not be included in the XML output.
You can optionally choose to include them if they are required for your solution.
Args:
output_path (str):
This is where the annotation files will be written.
If not-specified then the path will be derived from the .path_to_annotations and
.name properties of the dataset object.
segmented_ (bool) :
Defaults to False. Set to true to include this field in the XML schema of the output files.
path_ (bool) :
Defaults to False. Set to true to include this field in the XML schema of the output files.
database_ (bool) :
Defaults to False. Set to true to include this field in the XML schema of the output files.
folder_ (bool) :
Defaults to False. Set to true to include this field in the XML schema of the output files.
occluded_ (bool) :
Defaults to False. Set to true to include this field in the XML schema of the output files.
Returns:
A list with 1 or more paths (strings) to annotations files.
Example:
>>> dataset.export.ExportToVoc()
['data/voc_annotations/000000000322.xml', ...]
"""
ds = self.dataset
if output_path == None:
output_path = ds.path_to_annotations
else:
output_path = output_path
os.makedirs(output_path, exist_ok=True)
output_file_paths = []
def voc_xml_file_creation(
data,
file_name,
output_file_path,
segmented=True,
path=True,
database=True,
folder=True,
occluded=True,
):
index = 0
df_smaller = data[data["img_filename"] == file_name].reset_index()
if len(df_smaller) == 1:
# print('test')
annotation_text_start = "<annotation>"
flder_lkp = str(df_smaller.loc[index]["img_folder"])
if folder == True and flder_lkp != "":
folder_text = "<folder>" + flder_lkp + "</folder>"
else:
folder_text = ""
filename_text = (
"<filename>"
+ str(df_smaller.loc[index]["img_filename"])
+ "</filename>"
)
pth_lkp = str(df_smaller.loc[index]["img_path"])
if path == True and pth_lkp != "":
path_text = "<path>" + pth_lkp + "</path>"
else:
path_text = ""
sources_text = ""
size_text_start = "<size>"
width_text = (
"<width>" + str(df_smaller.loc[index]["img_width"]) + "</width>"
)
height_text = (
"<height>" + str(df_smaller.loc[index]["img_height"]) + "</height>"
)
depth_text = (
"<depth>" + str(df_smaller.loc[index]["img_depth"]) + "</depth>"
)
size_text_end = "</size>"
seg_lkp = str(df_smaller.loc[index]["ann_segmented"])
if segmented == True and seg_lkp != "":
segmented_text = (
"<segmented>"
+ str(df_smaller.loc[index]["ann_segmented"])
+ "</segmented>"
)
else:
segmented_text = ""
object_text_start = "<object>"
name_text = (
"<name>" + str(df_smaller.loc[index]["cat_name"]) + "</name>"
)
pose_text = (
"<pose>" + str(df_smaller.loc[index]["ann_pose"]) + "</pose>"
)
truncated_text = (
"<truncated>"
+ str(df_smaller.loc[index]["ann_truncated"])
+ "</truncated>"
)
difficult_text = (
"<difficult>"
+ str(df_smaller.loc[index]["ann_difficult"])
+ "</difficult>"
)
occluded_text = ""
bound_box_text_start = "<bndbox>"
xmin_text = (
"<xmin>"
+ str(df_smaller.loc[index]["ann_bbox_xmin"].astype("int"))
+ "</xmin>"
)
xmax_text = (
"<xmax>"
+ str(df_smaller.loc[index]["ann_bbox_xmax"].astype("int"))
+ "</xmax>"
)
ymin_text = (
"<ymin>"
+ str(df_smaller.loc[index]["ann_bbox_ymin"].astype("int"))
+ "</ymin>"
)
ymax_text = (
"<ymax>"
+ str(df_smaller.loc[index]["ann_bbox_ymax"].astype("int"))
+ "</ymax>"
)
bound_box_text_end = "</bndbox>"
object_text_end = "</object>"
annotation_text_end = "</annotation>"
xmlstring = (
annotation_text_start
+ folder_text
+ filename_text
+ path_text
+ sources_text
+ size_text_start
+ width_text
+ height_text
+ depth_text
+ size_text_end
+ segmented_text
+ object_text_start
+ name_text
+ pose_text
+ truncated_text
+ difficult_text
+ occluded_text
+ bound_box_text_start
+ xmin_text
+ xmax_text
+ ymin_text
+ ymax_text
+ bound_box_text_end
+ object_text_end
+ annotation_text_end
)
dom = xml.dom.minidom.parseString(xmlstring)
pretty_xml_as_string = dom.toprettyxml()
with open(output_file_path, "w") as f:
f.write(pretty_xml_as_string)
return output_file_path
else:
# print('test')
annotation_text_start = "<annotation>"
flder_lkp = str(df_smaller.loc[index]["img_folder"])
if folder == True and flder_lkp != "":
folder_text = "<folder>" + flder_lkp + "</folder>"
else:
folder_text = ""
filename_text = (
"<filename>"
+ str(df_smaller.loc[index]["img_filename"])
+ "</filename>"
)
pth_lkp = str(df_smaller.loc[index]["img_path"])
if path == True and pth_lkp != "":
path_text = "<path>" + pth_lkp + "</path>"
else:
path_text = ""
# db_lkp = str(df_smaller.loc[index]['Databases'])
# if database == True and db_lkp != '':
# sources_text = '<source>'+'<database>'+ db_lkp +'</database>'+'</source>'
# else:
sources_text = ""
size_text_start = "<size>"
width_text = (
"<width>" + str(df_smaller.loc[index]["img_width"]) + "</width>"
)
height_text = (
"<height>" + str(df_smaller.loc[index]["img_height"]) + "</height>"
)
depth_text = (
"<depth>" + str(df_smaller.loc[index]["img_depth"]) + "</depth>"
)
size_text_end = "</size>"
seg_lkp = str(df_smaller.loc[index]["ann_segmented"])
if segmented == True and seg_lkp != "":
segmented_text = (
"<segmented>"
+ str(df_smaller.loc[index]["ann_segmented"])
+ "</segmented>"
)
else:
segmented_text = ""
xmlstring = (
annotation_text_start
+ folder_text
+ filename_text
+ path_text
+ sources_text
+ size_text_start
+ width_text
+ height_text
+ depth_text
+ size_text_end
+ segmented_text
)
for obj in range(len(df_smaller)):
object_text_start = "<object>"
name_text = (
"<name>" + str(df_smaller.loc[index]["cat_name"]) + "</name>"
)
pose_text = (
"<pose>" + str(df_smaller.loc[index]["ann_pose"]) + "</pose>"
)
truncated_text = (
"<truncated>"
+ str(df_smaller.loc[index]["ann_truncated"])
+ "</truncated>"
)
difficult_text = (
"<difficult>"
+ str(df_smaller.loc[index]["ann_difficult"])
+ "</difficult>"
)
# occ_lkp = str(df_smaller.loc[index]['Object Occluded'])
# if occluded==True and occ_lkp != '':
# occluded_text = '<occluded>'+occ_lkp+'</occluded>'
# else:
occluded_text = ""
bound_box_text_start = "<bndbox>"
xmin_text = (
"<xmin>"
+ str(df_smaller.loc[index]["ann_bbox_xmin"].astype("int"))
+ "</xmin>"
)
xmax_text = (
"<xmax>"
+ str(df_smaller.loc[index]["ann_bbox_xmax"].astype("int"))
+ "</xmax>"
)
ymin_text = (
"<ymin>"
+ str(df_smaller.loc[index]["ann_bbox_ymin"].astype("int"))
+ "</ymin>"
)
ymax_text = (
"<ymax>"
+ str(df_smaller.loc[index]["ann_bbox_ymax"].astype("int"))
+ "</ymax>"
)
bound_box_text_end = "</bndbox>"
object_text_end = "</object>"
annotation_text_end = "</annotation>"
index = index + 1
xmlstring = (
xmlstring
+ object_text_start
+ name_text
+ pose_text
+ truncated_text
+ difficult_text
+ occluded_text
+ bound_box_text_start
+ xmin_text
+ xmax_text
+ ymin_text
+ ymax_text
+ bound_box_text_end
+ object_text_end
)
xmlstring = xmlstring + annotation_text_end
dom = xml.dom.minidom.parseString(xmlstring)
pretty_xml_as_string = dom.toprettyxml()
with open(output_file_path, "w") as f:
f.write(pretty_xml_as_string)
return output_file_path
# Loop through all images in the dataframe and call voc_xml_file_creation for each one
for file_title in list(set(self.dataset.df.img_filename)):
file_name = Path(file_title)
file_name = str(file_name.with_suffix(".xml"))
file_path = str(Path(output_path, file_name))
voc_file_path = voc_xml_file_creation(
ds.df,
file_title,
segmented=segmented_,
path=path_,
database=database_,
folder=folder_,
occluded=occluded_,
output_file_path=file_path,
)
output_file_paths.append(voc_file_path)
return output_file_paths
def ExportToYoloV5(
self,
output_path="training/labels",
yaml_file="dataset.yaml",
copy_images=False,
use_splits=False,
cat_id_index=None,
):
"""Writes annotation files to disk in YOLOv5 format and returns the paths to files.
Args:
output_path (str):
This is where the annotation files will be written.
If not-specified then the path will be derived from the .path_to_annotations and
.name properties of the dataset object. If you are exporting images to train a model, the recommended path
to use is 'training/labels'.
yaml_file (str):
If a file name (string) is provided, a YOLOv5 YAML file will be created with entries for the files
and classes in this dataset. It will be created in the parent of the output_path directory.
The recommended name for the YAML file is 'dataset.yaml'.
copy_images (boolean):
If True, then the annotated images will be copied to a directory next to the labels directory into
a directory named 'images'. This will prepare your labels and images to be used as inputs to
train a YOLOv5 model.
use_splits (boolean):
If True, then the images and annotations will be moved into directories based on the values in the split column.
For example, if a row has the value split = "train" then the annotations for that row will be moved to directory
/train. If a YAML file is specificied then the YAML file will use the splits to specify the folders user for the
train, val, and test datasets.
cat_id_index (int):
Reindex the cat_id values so that that they start from an int (usually 0 or 1) and
then increment the cat_ids to index + number of categories continuously.
It's useful if the cat_ids are not continuous in the original dataset.
Yolo requires the set of annotations to start at 0 when training a model.
Returns:
A list with 1 or more paths (strings) to annotations files. If a YAML file is created
then the first item in the list will be the path to the YAML file.
Examples:
>>> dataset.export.ExportToYoloV5(output_path='training/labels',
>>> yaml_file='dataset.yaml', cat_id_index=0)
['training/dataset.yaml', 'training/labels/frame_0002.txt', ...]
"""
ds = self.dataset
# Inspired by https://github.com/aws-samples/groundtruth-object-detection/blob/master/create_annot.py
yolo_dataset = ds.df.copy(deep=True)
# Convert nan values in the split column from nan to '' because those are easier to with with when building paths
yolo_dataset.split = yolo_dataset.split.fillna("")
# Create all of the paths that will be used to manage the files in this dataset
path_dict = {}
# The output path is the main path that will be used to create the other relative paths
path = PurePath(output_path)
path_dict["label_path"] = output_path
# The /images directory should be next to the /labels directory
path_dict["image_path"] = str(PurePath(path.parent, "images"))
# The root directory is in parent of the /labels and /images directories
path_dict["root_path"] = str(PurePath(path.parent))
# The YAML file should be in root directory
path_dict["yaml_path"] = str(PurePath(path_dict["root_path"], yaml_file))
# The root directory will usually be next to the yolov5 directory.
# Specify the relative path
path_dict["root_path_from_yolo_dir"] = str(PurePath("../"))
# If these default values to not match the users environment then they can manually edit the YAML file
if copy_images:
# Create the folder that the images will be copied to
Path(path_dict["image_path"]).mkdir(parents=True, exist_ok=True)
# Drop rows that are not annotated
# Note, having zero annotates can still be considered annotated
# in cases when are no objects in the image thats should be indentified
yolo_dataset = yolo_dataset.loc[yolo_dataset["annotated"] == 1]
yolo_dataset["cat_id"] = (
yolo_dataset["cat_id"].astype("float").astype(
|
pd.Int32Dtype()
|
pandas.Int32Dtype
|
import pandas as pd
import json
import csv
# p_ath = '/Users/amandeep/Github/maa-analysis/MAA_Datasets/v3.2.0'
# edge_file_labels_descriptions = 'wikidata-20200803-all-edges-for-V3.2.0_KB-nodes-property-counts-with-labels-and-descriptions.tsv.gz'
# subgraph_sorted = 'wikidata_maa_subgraph_sorted_2.tsv'
# property_labels_id_file = 'property-labels-for-V3.2.0_KB_edge_id.tsv'
# qnodes_maa_labels_id_file = 'wikidata_maa_labels_edges_with_id.tsv'
class KGTKAnalysis(object):
def __init__(self, p_ath):
self.p_ath = p_ath
def convert_node_labels_to_edge(self, edge_file_labels_descriptions):
df = pd.read_csv(f'{self.p_ath}/{edge_file_labels_descriptions}', sep='\t')
df = df.drop(columns=['label', 'node2']).fillna('')
r = []
for i, row in df.iterrows():
r.append({
'node1': row['node1'],
'label': 'label',
'node2': json.dumps(self.clean_string(row['node1;label'])),
'id': f'{row["node1"]}-label-1'
})
r.append({
'node1': row['node1'],
'label': 'description',
'node2': json.dumps(self.clean_string(row['node1;description'])),
'id': f'{row["node1"]}-description-1'
})
df_r = pd.DataFrame(r)
df_r.to_csv(f'{self.p_ath}/label-descriptions-for-V3.2.0_KB-nodes.tsv', sep='\t', index=False,
quoting=csv.QUOTE_NONE)
@staticmethod
def clean_string(i_str):
if '@' in i_str:
i = i_str.index('@')
return i_str[1:i - 1]
return i_str
@staticmethod
def clean_string_en(i_str):
if '@' in i_str:
vals = i_str.split('@')
if vals[1] == 'en':
return json.dumps(vals[0].replace("'", ""))
return None
def find_all_properties(self, subgraph_sorted):
df = pd.read_csv(f'{self.p_ath}/{subgraph_sorted}', sep='\t').fillna('')
properties = [x for x in list(df['label'].unique()) if x.startswith('P')]
r = [{'node1': p} for p in properties]
df_r = pd.DataFrame(r)
df_r.to_csv(f'{self.p_ath}/properties-for-V3.2.0_KB-nodes.tsv', sep='\t', index=False)
def find_all_properties_for_values(self):
df =
|
pd.read_csv(f'{self.p_ath}/property-labels-for-V3.2.0_KB_edge.tsv', sep='\t')
|
pandas.read_csv
|
import numpy as np, pandas as pd, time, os, subprocess, scipy as sp, diffmap as dm
import importlib, matplotlib.pyplot as plt, sklearn.covariance as skcov
import scipy.io, sklearn.metrics
from scipy.sparse import csr_matrix
# Use two single-cell libraries to call approx nearest neighbors and UMAP.
import scanpy as sc, anndata as adata
itime = time.time()
# Read in GLS p-values.
pairwise_distances = np.load('GLS_p.npy')
gene_names = np.ravel(pd.read_csv('genes.txt', header=None))
# Read in clusterONE analysis results.
cone_clusts = pd.read_csv('clusterOne_clusters.tsv', sep="\t", index_col=0, header=0)
clustdata = cone_clusts.iloc[:,10:].fillna(value='')
nclusts = clustdata.shape[0]
cluster_ndces = {}
for gname in gene_names:
cluster_ndces[gname] = np.where(clustdata == gname)[0]
if len(cluster_ndces)%500 == 0:
print("{} genes finished. \tTime: {}".format(len(cluster_ndces), time.time() - itime))
clust_mmships = np.zeros((len(gene_names), nclusts))
for i in range(len(gene_names)):
clust_mmships[i, cluster_ndces[gene_names[i]]] = 1
clust_mmships = sp.sparse.csr_matrix(clust_mmships)
sp.sparse.save_npz('clusterone_memberships.npz', clust_mmships)
# Build and save pairwise Jaccard similarities between genes, according to the clusterings given.
gg_jaccsims = dm.pairwise_jaccard_graph(clust_mmships)
gm_inc = dm.build_knn(gg_jaccsims, k=10, symmetrize_type='inclusive')
# Use GLS -log(p) values between each pair of genes (the (genes x genes) matrix GLS_p) as the adjacency matrix of the GLS graph.
a = -np.log(pairwise_distances)
a[np.isinf(a)] = 0
GLS_pvals_100 = dm.build_knn(a, k=100, symmetrize_type='inclusive')
GLS_pvals_10 = dm.build_knn(GLS_pvals_100, k=10, symmetrize_type='inclusive')
sp.sparse.save_npz('GLS_pvals_10NN.npz', GLS_pvals_10)
# Construct the combined graph
frac_CO_graph = 0.99
frac_GLS_graph = 1-frac_CO_graph
CO_graph = gm_inc
GLS_graph = GLS_pvals_10
adj_mat = sp.sparse.csr_matrix(
(frac_CO_graph * CO_graph) +
(frac_GLS_graph * GLS_graph)
)
n_cmps = 100
reduced_dim = 50
sffix = "_GLS01_CO99"
vizdf_filename = "vizdf{}.csv".format(sffix)
# emap_naive, eigvals = dm.diffmap_proj(adj_mat, t=0, n_comps=reduced_dim, embed_type='naive', return_eigvals=True)
# print("Laplacian eigenmap computed. Time: {}".format(time.time() - itime))
emap_heat = dm.diffmap_proj(adj_mat, n_comps=n_cmps, n_dims=40, min_energy_frac=0.9, embed_type='diffmap', return_eigvals=False)
print("Diffusion components computed. Time: {}".format(time.time() - itime))
ann_heat = adata.AnnData(X=emap_heat[:, :40])
sc.pp.neighbors(ann_heat)
print(time.time() - itime)
sc.tl.umap(ann_heat)
print(time.time() - itime)
heat_umap = np.array(ann_heat.obsm['X_umap'])
vizdf =
|
pd.DataFrame(data=heat_umap, columns=['hUMAP_x', 'hUMAP_y'])
|
pandas.DataFrame
|
import re
import os
import string
import ipdb
import pickle
import matplotlib
matplotlib.use('Agg')
from matplotlib import rcParams
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import TimeSeriesSplit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import normalize
from sklearn.preprocessing import RobustScaler
from sklearn import linear_model
from wordcloud import WordCloud
from nltk import pos_tag, word_tokenize
import gensim.downloader as api
MIN_DF = 10
MAX_DF = 100
WORD_CLOUD_NUMBER = 50
BOW = "bow"
TFIDF = "tfidf"
WORD2VEC = "word2vec"
SKIPTHOUGHT = "skipThought"
def select_by_pos_tag(sentence, tags):
word_tokens = word_tokenize(sentence)
tagged_word_token = pos_tag(word_tokens)
selected_words = [word for word, tag in tagged_word_token if tag in tags]
return ' '.join(selected_words)
def clean_sentence(s):
s = re.sub("\n", " ", s)
s = re.sub("[" + string.punctuation + "]", " ", s)
s = re.sub("\?", " ", s)
s = re.sub("[0-9]+", " ", s)
s = re.sub(" +", " ", s)
return s.strip()
def generate_bag_of_words(train, test, feature_args):
vectorizer = CountVectorizer(min_df=MIN_DF, max_df=MAX_DF, **feature_args)
train_bag_of_words = vectorizer.fit_transform(train['text'].apply(clean_sentence)).toarray()
test_bag_of_words = vectorizer.transform(test['text'].apply(clean_sentence)).toarray()
train_bag_of_words = normalize(train_bag_of_words)
test_bag_of_words = normalize(test_bag_of_words)
word_list = vectorizer.get_feature_names()
train_text_df = pd.DataFrame(train_bag_of_words, index=train.index, columns=word_list)
test_text_df = pd.DataFrame(test_bag_of_words, index=test.index, columns=word_list)
bag_of_words_df = pd.concat([train_text_df, test_text_df], axis=0)
return bag_of_words_df, vectorizer
def generate_tfidf(train, test, feature_args):
vectorizer = TfidfVectorizer(min_df=MIN_DF, max_df=MAX_DF, **feature_args)
train_bag_of_words = vectorizer.fit_transform(train['text'].apply(clean_sentence)).toarray()
test_bag_of_words = vectorizer.transform(test['text'].apply(clean_sentence)).toarray()
word_list = vectorizer.get_feature_names()
train_text_df = pd.DataFrame(train_bag_of_words, index=train.index, columns=word_list)
test_text_df = pd.DataFrame(test_bag_of_words, index=test.index, columns=word_list)
bag_of_words_df = pd.concat([train_text_df, test_text_df], axis=0)
return bag_of_words_df, vectorizer
def average_word2vec(sentence, model):
sentence = clean_sentence(sentence)
word2vecs = []
for word in sentence.split(" "):
word = word.lower()
if word in model:
word2vecs.append(model[word])
return pd.Series(np.average(word2vecs, axis=0))
def generate_word2vec(train, test, feature_args):
path = 'word2vec/' + feature_args['model']
word2vec = pd.read_csv(path)
return word2vec[train.index[0]:test.index[-1] + 1], None
def generate_skip_thoughts(train, test, feature_args):
skip_thoughts = pd.read_csv(feature_args['path'])
return skip_thoughts[train.index[0]:test.index[-1] + 1], None
def generate_price_features(data):
price_feature_name = ['previous_price_{0:d}'.format(d) for d in range(1, 6)]
price_features = data[price_feature_name].values
return price_features
def generate_classification_label(data):
y = np.zeros(data.shape[0], np.float)
y[data['predicted_price'] > data['price']] = 1.0
return y
def generate_regression_label(data):
return (data['predicted_price'] - data['price']).values
def evaluate_return(open_price, y_hat, y):
revenue = 0
index = 0
buy_action = []
for price, predict, actual in zip(open_price, y_hat, y):
if predict >= 0.0 * price:
revenue += actual
buy_action.append(index)
index += 1
return revenue, buy_action
def run(data, split, feature_args, exp_label):
published_time = pd.to_datetime(data['published_time'])
y = generate_regression_label(data)
y_class = generate_classification_label(data)
X_price = data['price'].values
record = {
'classification':{
'train':pd.DataFrame(),
'test':pd.DataFrame()
},
'regression':{
'train':pd.DataFrame(),
'test':pd.DataFrame()
},
'pnl':{
'train':pd.DataFrame(),
'test':pd.DataFrame()
},
'buy_actions':{
},
'feature_size':{
}
}
feature_list = [BOW, TFIDF, WORD2VEC, SKIPTHOUGHT]
feature_functions = {
BOW:generate_bag_of_words,
TFIDF:generate_tfidf,
WORD2VEC:generate_word2vec,
SKIPTHOUGHT:generate_skip_thoughts
}
fold_index = 0
tscv = TimeSeriesSplit(n_splits=split)
for train_index, test_index in tscv.split(data.values):
fold_index += 1
start_index = data.index[train_index[0]]
split_index = data.index[test_index[0]]
end_index = data.index[test_index[-1]] + 1
train = data[start_index:split_index]
test = data[split_index:end_index]
X_list = []
for feature_name in feature_list:
if feature_name in feature_args:
features, vectorizer = feature_functions[feature_name](train, test, feature_args[feature_name])
X_list.append(features)
if len(X_list) > 1:
array_list = [features.values for features in X_list]
X = np.concatenate(array_list, axis=1)
else:
X = X_list[0].values
feature_size = X.shape[1]
print("feature size:", feature_size)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
y_class_train, y_class_test = y_class[train_index], y_class[test_index]
X_train_price = X_price[train_index]
X_test_price = X_price[test_index]
# Normalization and Scaling
scaler = RobustScaler()
scaler.fit(y_train.reshape(-1, 1))
y_train_t = scaler.transform(y_train.reshape(-1, 1)).reshape(-1, )
x_train_t = X_train
x_test_t = X_test
# Modeling
classifiers_dict = {
'Logistic Regression':LogisticRegression(penalty='l2', C=0.05, verbose=0, max_iter=10000)
}
regressors_dict = {
'SVR':SVR(kernel='linear', C=1.0, verbose=0),
'Ridge Regression':linear_model.Ridge(alpha=5.0)
}
train_class_err = {}
test_class_err = {}
train_regre_err = {}
test_regre_err = {}
train_pnl_err = {}
test_pnl_err = {}
test_buy_times = []
for label, clf in classifiers_dict.items():
clf.fit(x_train_t, y_class_train)
y_class_train_pred = clf.predict(x_train_t)
y_class_test_pred = clf.predict(x_test_t)
# classification error
train_acc = accuracy_score(y_class_train, y_class_train_pred)
test_acc = accuracy_score(y_class_test, y_class_test_pred)
train_class_err[label] = train_acc
test_class_err[label] = test_acc
# PNL error
train_return, train_buy_action = evaluate_return(X_train_price, y_class_train_pred, y_train)
test_return, test_buy_action = evaluate_return(X_test_price, y_class_test_pred, y_test)
train_pnl_err[label] = train_return
test_pnl_err[label] = test_return
if label not in record['buy_actions']:
record['buy_actions'][label] = []
for action_time in test_buy_action:
record['buy_actions'][label].append(action_time + len(X_train))
for label, clf in regressors_dict.items():
clf.fit(x_train_t, y_train_t)
y_train_pred = clf.predict(x_train_t)
y_test_pred = clf.predict(x_test_t)
# classification error
y_class_train_pred = np.zeros(y_train_pred.shape[0], np.float)
y_class_train_pred[y_train_pred >= 0.0] = 1.0
y_class_test_pred = np.zeros(y_test_pred.shape[0], np.float)
y_class_test_pred[y_test_pred >= 0.0] = 1.0
train_acc = accuracy_score(y_class_train, y_class_train_pred)
test_acc = accuracy_score(y_class_test, y_class_test_pred)
train_class_err[label] = train_acc
test_class_err[label] = test_acc
# regression error
y_train_pred = scaler.inverse_transform(y_train_pred.reshape(-1, 1)).reshape(-1, )
y_test_pred = scaler.inverse_transform(y_test_pred.reshape(-1, 1)).reshape(-1, )
train_mse = mean_squared_error(y_train, y_train_pred)
test_mse = mean_squared_error(y_test, y_test_pred)
train_regre_err[label] = train_mse
test_regre_err[label] = test_mse
# PNL error
train_return, train_buy_action = evaluate_return(X_train_price, y_train_pred, y_train)
test_return, test_buy_action = evaluate_return(X_test_price, y_test_pred, y_test)
train_pnl_err[label] = train_return
test_pnl_err[label] = test_return
if label not in record['buy_actions']:
record['buy_actions'][label] = []
for action_time in test_buy_action:
record['buy_actions'][label].append(action_time + len(X_train))
record['classification']['train'] = record['classification']['train'].append(pd.Series(data=train_class_err), ignore_index=True)
record['classification']['test'] = record['classification']['test'].append(pd.Series(data=test_class_err), ignore_index=True)
record['regression']['train'] = record['regression']['train'].append(pd.Series(data=train_regre_err), ignore_index=True)
record['regression']['test'] = record['regression']['test'].append(pd.Series(data=test_regre_err), ignore_index=True)
record['pnl']['train'] = record['pnl']['train'].append(pd.Series(data=train_pnl_err), ignore_index=True)
record['pnl']['test'] = record['pnl']['test'].append(pd.Series(data=test_pnl_err), ignore_index=True)
record['feature_size'][str(fold_index)] = feature_size
# Words analysis
if vectorizer is not None and fold_index == split:
plot_word_coef_in_model_dict(classifiers_dict, vectorizer, exp_label)
plot_word_coef_in_model_dict(regressors_dict, vectorizer, exp_label)
bayes_result = analysis_bay(X_train, y_class_train, ['negative', 'positive'], vectorizer)
plot_word_analysis_result(bayes_result, 'bayes', exp_label)
return record
def plot_word_coef_in_model_dict(model_dict, vectorizer, exp_label):
for clf_name, clf in model_dict.items():
coef = clf.coef_
if len(coef) == 1:
coef = coef[0]
result = analysis(coef, vectorizer)
plot_word_analysis_result(result, clf_name, exp_label)
def plot_word_analysis_result(result, model, exp_label):
model_name = re.sub(' +', '_', model.lower())
exp_label = re.sub(' +', '_', exp_label.lower())
for class_name, freq in result.items():
label = '{0:s}_{1:s}_{2:s}'.format(model_name, class_name, exp_label)
plot_word_cloud(label, freq)
def plot_word_cloud(label, freq):
wordcloud = WordCloud()
wordcloud.generate_from_frequencies(frequencies=freq)
plt.clf()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
path = "picture/wordCloud/{0:s}.png".format(label)
plt.savefig(path)
def analysis_bay(X, y, class_labels, vectorizer):
clf = MultinomialNB()
clf.fit(X, y)
word_list = vectorizer.get_feature_names()
result = {}
for class_index, class_prob_array in enumerate(clf.feature_log_prob_):
result[class_labels[class_index]] = {}
maximum_prob_index = class_prob_array.argsort()[-WORD_CLOUD_NUMBER:][::-1]
for index in maximum_prob_index:
if index < len(word_list):
word = word_list[index]
result[class_labels[class_index]][word] = class_prob_array[index]
return result
def analysis(clf_coef, vectorizer):
result = {
'positive':{},
'negative':{}
}
maximum_weight_index = clf_coef.argsort()[-WORD_CLOUD_NUMBER:][::-1]
minimum_weight_index = clf_coef.argsort()[:WORD_CLOUD_NUMBER]
word_list = vectorizer.get_feature_names()
for positive_index, negative_index in zip(maximum_weight_index, minimum_weight_index):
positive_weight = clf_coef[positive_index]
negative_weight = clf_coef[negative_index]
if positive_weight > 0 and positive_index < len(word_list):
positive_term = word_list[positive_index]
result['positive'][positive_term] = positive_weight
if negative_weight < 0 and negative_index < len(word_list):
negative_term = word_list[negative_index]
result['negative'][negative_term] = -negative_weight
return result
def plot_record(record, label, selected_tasks):
measure_map = {
'classification':'Accuracy',
'regression':'MSE',
'pnl':'Dollar'
}
rcParams.update({'figure.autolayout': True})
for task, item in record.items():
if task in selected_tasks:
for sample, dataframe in item.items():
label_filename = re.sub(' +', '_', label)
title = '{0:s} task on {1:s} dataset {2:s}'.format(task, sample, label)
path = 'picture/experiment/{0:s}_{1:s}_{2:s}.png'.format(task, sample, label_filename)
plt.cla()
ax = dataframe.plot(kind='line', style='.-', xticks=range(len(dataframe)), title=title, legend=True)
ax.set(xlabel='Fold number', ylabel=measure_map[task])
plt.savefig(path)
def do_exp(data, num_split, feature_args, data_label, feature_label, redo=False):
exp_label = "({0:s}, {1:s})".format(data_label, feature_label)
print(exp_label)
print(feature_args)
file_name_label = re.sub(' +', '_', exp_label)
record_file_path = 'result/record_{0:s}.p'.format(file_name_label)
if not redo and os.path.isfile(record_file_path):
record = pickle.load(open(record_file_path, 'rb'))
return record
selected_tasks = ['classification', 'regression', 'pnl']
record = run(data, num_split, feature_args, exp_label)
plot_record(record, exp_label, selected_tasks)
pickle.dump(record, open(record_file_path, 'wb'))
return record
def plot_records(records, model, exp_label):
task_list = ['classification', 'regression', 'pnl']
merged_record = {}
for task in task_list:
train_record_list = []
test_record_list = []
for feature_label, record in records.items():
if model in record[task]['train']:
train_record_list.append(record[task]['train'][model].rename(feature_label))
test_record_list.append(record[task]['test'][model].rename(feature_label))
if len(train_record_list) > 0:
merged_record[task] = {}
merged_record[task]['train'] = pd.concat(train_record_list, axis=1)
merged_record[task]['test'] = pd.concat(test_record_list, axis=1)
model_file_name = re.sub(' +', '_', model.lower())
plot_record(merged_record, 'comparison_{0:s}_{1:s}'.format(model_file_name, exp_label), task_list)
def preprocess_news_df(news_df):
null_text_index = news_df[news_df['text'].isnull()].index
news_df.drop(null_text_index, inplace=True)
news_df['published_time'] =
|
pd.to_datetime(news_df['published_time'])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
@author: HYPJUDY 2019/4/15
https://github.com/HYPJUDY
Decoupling Localization and Classification in Single Shot Temporal Action Detection
-----------------------------------------------------------------------------------
Operations used by Decouple-SSAD
"""
import pandas as pd
import pandas
import numpy as np
import numpy
import os
import tensorflow as tf
from os.path import join
#################################### TRAIN & TEST #####################################
def abs_smooth(x):
"""Smoothed absolute function. Useful to compute an L1 smooth error.
Define as:
x^2 / 2 if abs(x) < 1
abs(x) - 0.5 if abs(x) > 1
We use here a differentiable definition using min(x) and abs(x). Clearly
not optimal, but good enough for our purpose!
"""
absx = tf.abs(x)
minx = tf.minimum(absx, 1)
r = 0.5 * ((absx - 1) * minx + absx)
return r
def jaccard_with_anchors(anchors_min, anchors_max, len_anchors, box_min, box_max):
"""Compute jaccard score between a box and the anchors.
"""
int_xmin = tf.maximum(anchors_min, box_min)
int_xmax = tf.minimum(anchors_max, box_max)
inter_len = tf.maximum(int_xmax - int_xmin, 0.)
union_len = len_anchors - inter_len + box_max - box_min
jaccard = tf.div(inter_len, union_len)
return jaccard
def loop_condition(idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
b_match_x, b_match_w, b_match_labels, b_match_scores):
r = tf.less(idx, tf.shape(b_glabels))
return r[0]
def loop_body(idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
b_match_x, b_match_w, b_match_labels, b_match_scores):
num_class = b_match_labels.get_shape().as_list()[-1]
label = b_glabels[idx][0:num_class]
box_min = b_gbboxes[idx, 0]
box_max = b_gbboxes[idx, 1]
# ground truth
box_x = (box_max + box_min) / 2
box_w = (box_max - box_min)
# predict
anchors_min = b_anchors_rx - b_anchors_rw / 2
anchors_max = b_anchors_rx + b_anchors_rw / 2
len_anchors = anchors_max - anchors_min
jaccards = jaccard_with_anchors(anchors_min, anchors_max, len_anchors, box_min, box_max)
# jaccards > b_match_scores > -0.5 & jaccards > matching_threshold
mask = tf.greater(jaccards, b_match_scores)
matching_threshold = 0.5
mask = tf.logical_and(mask, tf.greater(jaccards, matching_threshold))
mask = tf.logical_and(mask, b_match_scores > -0.5)
imask = tf.cast(mask, tf.int32)
fmask = tf.cast(mask, tf.float32)
# Update values using mask.
# if overlap enough, update b_match_* with gt, otherwise not update
b_match_x = fmask * box_x + (1 - fmask) * b_match_x
b_match_w = fmask * box_w + (1 - fmask) * b_match_w
ref_label = tf.zeros(tf.shape(b_match_labels), dtype=tf.int32)
ref_label = ref_label + label
b_match_labels = tf.matmul(tf.diag(imask), ref_label) + tf.matmul(tf.diag(1 - imask), b_match_labels)
b_match_scores = tf.maximum(jaccards, b_match_scores)
return [idx + 1, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
b_match_x, b_match_w, b_match_labels, b_match_scores]
def default_box(layer_steps, scale, a_ratios):
width_set = [scale * ratio for ratio in a_ratios]
center_set = [1. / layer_steps * i + 0.5 / layer_steps for i in range(layer_steps)]
width_default = []
center_default = []
for i in range(layer_steps):
for j in range(len(a_ratios)):
width_default.append(width_set[j])
center_default.append(center_set[i])
width_default = np.array(width_default)
center_default = np.array(center_default)
return width_default, center_default
def anchor_box_adjust(anchors, config, layer_name, pre_rx=None, pre_rw=None):
if pre_rx == None:
dboxes_w, dboxes_x = default_box(config.num_anchors[layer_name],
config.scale[layer_name], config.aspect_ratios[layer_name])
else:
dboxes_x = pre_rx
dboxes_w = pre_rw
anchors_conf = anchors[:, :, -3]
# anchors_conf=tf.nn.sigmoid(anchors_conf)
anchors_rx = anchors[:, :, -2]
anchors_rw = anchors[:, :, -1]
anchors_rx = anchors_rx * dboxes_w * 0.1 + dboxes_x
anchors_rw = tf.exp(0.1 * anchors_rw) * dboxes_w
# anchors_class=anchors[:,:,:config.num_classes]
num_class = anchors.get_shape().as_list()[-1] - 3
anchors_class = anchors[:, :, :num_class]
return anchors_class, anchors_conf, anchors_rx, anchors_rw
# This function is mainly used for producing matched ground truth with
# each adjusted anchors after predicting one by one
# the matched ground truth may be positive/negative,
# the matched x,w,labels,scores all corresponding to this anchor
def anchor_bboxes_encode(anchors, glabels, gbboxes, Index, config, layer_name, pre_rx=None, pre_rw=None):
num_anchors = config.num_anchors[layer_name]
num_dbox = config.num_dbox[layer_name]
# num_classes = config.num_classes
num_classes = anchors.get_shape().as_list()[-1] - 3
dtype = tf.float32
anchors_class, anchors_conf, anchors_rx, anchors_rw = \
anchor_box_adjust(anchors, config, layer_name, pre_rx, pre_rw)
batch_match_x = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox])
batch_match_w = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox])
batch_match_scores = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox])
batch_match_labels = tf.reshape(tf.constant([], dtype=tf.int32),
[-1, num_anchors * num_dbox, num_classes])
for i in range(config.batch_size):
shape = (num_anchors * num_dbox)
match_x = tf.zeros(shape, dtype)
match_w = tf.zeros(shape, dtype)
match_scores = tf.zeros(shape, dtype)
match_labels_other = tf.ones((num_anchors * num_dbox, 1), dtype=tf.int32)
match_labels_class = tf.zeros((num_anchors * num_dbox, num_classes - 1), dtype=tf.int32)
match_labels = tf.concat([match_labels_other, match_labels_class], axis=-1)
b_anchors_rx = anchors_rx[i]
b_anchors_rw = anchors_rw[i]
b_glabels = glabels[Index[i]:Index[i + 1]]
b_gbboxes = gbboxes[Index[i]:Index[i + 1]]
idx = 0
[idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
match_x, match_w, match_labels, match_scores] = \
tf.while_loop(loop_condition, loop_body,
[idx, b_anchors_rx, b_anchors_rw,
b_glabels, b_gbboxes,
match_x, match_w, match_labels, match_scores])
match_x = tf.reshape(match_x, [-1, num_anchors * num_dbox])
batch_match_x = tf.concat([batch_match_x, match_x], axis=0)
match_w = tf.reshape(match_w, [-1, num_anchors * num_dbox])
batch_match_w = tf.concat([batch_match_w, match_w], axis=0)
match_scores = tf.reshape(match_scores, [-1, num_anchors * num_dbox])
batch_match_scores = tf.concat([batch_match_scores, match_scores], axis=0)
match_labels = tf.reshape(match_labels, [-1, num_anchors * num_dbox, num_classes])
batch_match_labels = tf.concat([batch_match_labels, match_labels], axis=0)
return [batch_match_x, batch_match_w, batch_match_labels, batch_match_scores,
anchors_class, anchors_conf, anchors_rx, anchors_rw]
def in_conv(layer, initer=tf.contrib.layers.xavier_initializer(seed=5)):
net = tf.layers.conv1d(inputs=layer, filters=1024, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
out = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=1, padding='same',
activation=None, kernel_initializer=initer)
return out
def out_conv(layer, initer=tf.contrib.layers.xavier_initializer(seed=5)):
net = tf.nn.relu(layer)
out = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
return out
############################ TRAIN and TEST NETWORK LAYER ###############################
def get_trainable_variables():
trainable_variables_scope = [a.name for a in tf.trainable_variables()]
trainable_variables_list = tf.trainable_variables()
trainable_variables = []
for i in range(len(trainable_variables_scope)):
if ("base_feature_network" in trainable_variables_scope[i]) or \
("anchor_layer" in trainable_variables_scope[i]) or \
("predict_layer" in trainable_variables_scope[i]):
trainable_variables.append(trainable_variables_list[i])
return trainable_variables
def base_feature_network(X, mode=''):
# main network
initer = tf.contrib.layers.xavier_initializer(seed=5)
with tf.variable_scope("base_feature_network" + mode):
# ----------------------- Base layers ----------------------
# [batch_size, 128, 1024]
net = tf.layers.conv1d(inputs=X, filters=512, kernel_size=9, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 128, 512]
net = tf.layers.max_pooling1d(inputs=net, pool_size=4, strides=2, padding='same')
# [batch_size, 64, 512]
net = tf.layers.conv1d(inputs=net, filters=512, kernel_size=9, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 64, 512]
net = tf.layers.max_pooling1d(inputs=net, pool_size=4, strides=2, padding='same')
# [batch_size, 32, 512]
return net
def main_anchor_layer(net, mode=''):
# main network
initer = tf.contrib.layers.xavier_initializer(seed=5)
with tf.variable_scope("main_anchor_layer" + mode):
# ----------------------- Anchor layers ----------------------
MAL1 = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 16, 1024]
MAL2 = tf.layers.conv1d(inputs=MAL1, filters=1024, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 8, 1024]
MAL3 = tf.layers.conv1d(inputs=MAL2, filters=1024, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 4, 1024]
return MAL1, MAL2, MAL3
def branch_anchor_layer(MALs, name=''):
MAL1, MAL2, MAL3 = MALs
with tf.variable_scope("branch_anchor_layer" + name):
BAL3 = out_conv(in_conv(MAL3)) # [batch_size, 4, 1024]
BAL3_expd = tf.expand_dims(BAL3, 1) # [batch_size, 1, 4, 1024]
BAL3_de = tf.layers.conv2d_transpose(BAL3_expd, 1024, kernel_size=(1, 4),
strides=(1, 2), padding='same') # [batch_size, 1, 8, 1024]
BAL3_up = tf.reduce_sum(BAL3_de, [1]) # [batch_size, 8, 1024]
MAL2_in_conv = in_conv(MAL2)
BAL2 = out_conv((MAL2_in_conv * 2 + BAL3_up) / 3) # [batch_size, 8, 1024]
MAL2_expd = tf.expand_dims(BAL2, 1) # [batch_size, 1, 8, 1024]
MAL2_de = tf.layers.conv2d_transpose(MAL2_expd, 1024, kernel_size=(1, 4),
strides=(1, 2), padding='same') # [batch_size, 1, 16, 1024]
MAL2_up = tf.reduce_sum(MAL2_de, [1]) # [batch_size, 16, 1024]
MAL1_in_conv = in_conv(MAL1)
BAL1 = out_conv((MAL1_in_conv * 2 + MAL2_up) / 3) # [batch_size, 16, 1024]
return BAL1, BAL2, BAL3
# action or not + conf + location (center&width)
# Anchor Binary Classification and Regression
def biClsReg_predict_layer(config, layer, layer_name, specific_layer):
num_dbox = config.num_dbox[layer_name]
with tf.variable_scope("biClsReg_predict_layer" + layer_name + specific_layer):
anchor = tf.layers.conv1d(inputs=layer, filters=num_dbox * (1 + 3),
kernel_size=3, padding='same', kernel_initializer=
tf.contrib.layers.xavier_initializer(seed=5))
anchor = tf.reshape(anchor, [config.batch_size, -1, (1 + 3)])
return anchor
# action or not + class score + conf + location (center&width)
# Action Multi-Class Classification and Regression
def mulClsReg_predict_layer(config, layer, layer_name, specific_layer):
num_dbox = config.num_dbox[layer_name]
ncls = config.num_classes
with tf.variable_scope("mulClsReg_predict_layer" + layer_name + specific_layer):
anchor = tf.layers.conv1d(inputs=layer, filters=num_dbox * (ncls + 3),
kernel_size=3, padding='same', kernel_initializer=
tf.contrib.layers.xavier_initializer(seed=5))
anchor = tf.reshape(anchor, [config.batch_size, -1, (ncls + 3)])
return anchor
#################################### TRAIN LOSS #####################################
def loss_function(anchors_class, anchors_conf, anchors_xmin, anchors_xmax,
match_x, match_w, match_labels, match_scores, config):
match_xmin = match_x - match_w / 2
match_xmax = match_x + match_w / 2
pmask = tf.cast(match_scores > 0.5, dtype=tf.float32)
num_positive = tf.reduce_sum(pmask)
num_entries = tf.cast(tf.size(match_scores), dtype=tf.float32)
hmask = match_scores < 0.5
hmask = tf.logical_and(hmask, anchors_conf > 0.5)
hmask = tf.cast(hmask, dtype=tf.float32)
num_hard = tf.reduce_sum(hmask)
# the meaning of r_negative: the ratio of anchors need to choose from easy negative anchors
# If we have `num_positive` positive anchors in training data,
# then we only need `config.negative_ratio*num_positive` negative anchors
# r_negative=(number of easy negative anchors need to choose from all easy negative) / (number of easy negative)
# the meaning of easy negative: all-pos-hard_neg
r_negative = (config.negative_ratio - num_hard / num_positive) * num_positive / (
num_entries - num_positive - num_hard)
r_negative = tf.minimum(r_negative, 1)
nmask = tf.random_uniform(tf.shape(pmask), dtype=tf.float32)
nmask = nmask * (1. - pmask)
nmask = nmask * (1. - hmask)
nmask = tf.cast(nmask > (1. - r_negative), dtype=tf.float32)
# class_loss
weights = pmask + nmask + hmask
class_loss = tf.nn.softmax_cross_entropy_with_logits(logits=anchors_class, labels=match_labels)
class_loss = tf.losses.compute_weighted_loss(class_loss, weights)
# correct_pred = tf.equal(tf.argmax(anchors_class, 2), tf.argmax(match_labels, 2))
# accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))
# loc_loss
weights = pmask
loc_loss = abs_smooth(anchors_xmin - match_xmin) + abs_smooth(anchors_xmax - match_xmax)
loc_loss = tf.losses.compute_weighted_loss(loc_loss, weights)
# conf loss
weights = pmask + nmask + hmask
# match_scores is from jaccard_with_anchors
conf_loss = abs_smooth(match_scores - anchors_conf)
conf_loss = tf.losses.compute_weighted_loss(conf_loss, weights)
return class_loss, loc_loss, conf_loss
#################################### POST PROCESS #####################################
def min_max_norm(X):
# map [0,1] -> [0.5,0.73] (almost linearly) ([-1, 0] -> [0.26, 0.5])
return 1.0 / (1.0 + np.exp(-1.0 * X))
def post_process(df, config):
class_scores_class = [(df['score_' + str(i)]).values[:].tolist() for i in range(21)]
class_scores_seg = [[class_scores_class[j][i] for j in range(21)] for i in range(len(df))]
class_real = [0] + config.class_real # num_classes + 1
# save the top 2 or 3 score element
# append the largest score element
class_type_list = []
class_score_list = []
for i in range(len(df)):
class_score = np.array(class_scores_seg[i][1:]) * min_max_norm(df.conf.values[i])
class_score = class_score.tolist()
class_type = class_real[class_score.index(max(class_score)) + 1]
class_type_list.append(class_type)
class_score_list.append(max(class_score))
resultDf1 = pd.DataFrame()
resultDf1['out_type'] = class_type_list
resultDf1['out_score'] = class_score_list
resultDf1['start'] = df.xmin.values[:]
resultDf1['end'] = df.xmax.values[:]
# append the second largest score element
class_type_list = []
class_score_list = []
for i in range(len(df)):
class_score = np.array(class_scores_seg[i][1:]) * min_max_norm(df.conf.values[i])
class_score = class_score.tolist()
class_score[class_score.index(max(class_score))] = 0
class_type = class_real[class_score.index(max(class_score)) + 1]
class_type_list.append(class_type)
class_score_list.append(max(class_score))
resultDf2 = pd.DataFrame()
resultDf2['out_type'] = class_type_list
resultDf2['out_score'] = class_score_list
resultDf2['start'] = df.xmin.values[:]
resultDf2['end'] = df.xmax.values[:]
resultDf1 = pd.concat([resultDf1, resultDf2])
# # append the third largest score element (improve little and slow)
class_type_list = []
class_score_list = []
for i in range(len(df)):
class_score = np.array(class_scores_seg[i][1:]) * min_max_norm(df.conf.values[i])
class_score = class_score.tolist()
class_score[class_score.index(max(class_score))] = 0
class_score[class_score.index(max(class_score))] = 0
class_type = class_real[class_score.index(max(class_score)) + 1]
class_type_list.append(class_type)
class_score_list.append(max(class_score))
resultDf2 = pd.DataFrame()
resultDf2['out_type'] = class_type_list
resultDf2['out_score'] = class_score_list
resultDf2['start'] = df.xmin.values[:]
resultDf2['end'] = df.xmax.values[:]
resultDf1 = pd.concat([resultDf1, resultDf2])
# resultDf1=resultDf1[resultDf1.out_score>0.05]
resultDf1['video_name'] = [df['video_name'].values[0] for _ in range(len(resultDf1))]
return resultDf1
def temporal_nms(config, dfNMS, filename, videoname):
nms_threshold = config.nms_threshold
fo = open(filename, 'a')
typeSet = list(set(dfNMS.out_type.values[:]))
for t in typeSet:
tdf = dfNMS[dfNMS.out_type == t]
t1 = np.array(tdf.start.values[:])
t2 = np.array(tdf.end.values[:])
scores = np.array(tdf.out_score.values[:])
ttype = list(tdf.out_type.values[:])
durations = t2 - t1
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
tt1 = np.maximum(t1[i], t1[order[1:]])
tt2 = np.minimum(t2[i], t2[order[1:]])
intersection = tt2 - tt1
IoU = intersection / (durations[i] + durations[order[1:]] - intersection).astype(float)
inds = np.where(IoU <= nms_threshold)[0]
order = order[inds + 1]
for idx in keep:
# class_real: do not have class 0 (ambiguous) -> remove all ambiguous class
if ttype[idx] in config.class_real:
if videoname in ["video_test_0001255", "video_test_0001058",
"video_test_0001459", "video_test_0001195", "video_test_0000950"]: # 25fps
strout = "%s\t%.3f\t%.3f\t%d\t%.4f\n" % (videoname, float(t1[idx]) / 25,
float(t2[idx]) / 25, ttype[idx], scores[idx])
elif videoname == "video_test_0001207": # 24fps
strout = "%s\t%.3f\t%.3f\t%d\t%.4f\n" % (videoname, float(t1[idx]) / 24,
float(t2[idx]) / 24, ttype[idx], scores[idx])
else: # most videos are 30fps
strout = "%s\t%.3f\t%.3f\t%d\t%.4f\n" % (videoname, float(t1[idx]) / 30,
float(t2[idx]) / 30, ttype[idx], scores[idx])
fo.write(strout)
def fuse_two_stream(spatial_path, temporal_path):
temporal_df = pd.read_csv(temporal_path)
spatial_df = pd.read_csv(spatial_path)
out_df = temporal_df
out_df['conf'] = temporal_df.conf.values[:] * 2 / 3 + spatial_df.conf.values * 1 / 3
out_df['xmin'] = temporal_df.xmin.values[:] * 2 / 3 + spatial_df.xmin.values * 1 / 3
out_df['xmax'] = temporal_df.xmax.values[:] * 2 / 3 + spatial_df.xmax.values * 1 / 3
out_df['score_0'] = temporal_df.score_0.values[:] * 2 / 3 + spatial_df.score_0.values * 1 / 3
out_df['score_1'] = temporal_df.score_1.values[:] * 2 / 3 + spatial_df.score_1.values * 1 / 3
out_df['score_2'] = temporal_df.score_2.values[:] * 2 / 3 + spatial_df.score_2.values * 1 / 3
out_df['score_3'] = temporal_df.score_3.values[:] * 2 / 3 + spatial_df.score_3.values * 1 / 3
out_df['score_4'] = temporal_df.score_4.values[:] * 2 / 3 + spatial_df.score_4.values * 1 / 3
out_df['score_5'] = temporal_df.score_5.values[:] * 2 / 3 + spatial_df.score_5.values * 1 / 3
out_df['score_6'] = temporal_df.score_6.values[:] * 2 / 3 + spatial_df.score_6.values * 1 / 3
out_df['score_7'] = temporal_df.score_7.values[:] * 2 / 3 + spatial_df.score_7.values * 1 / 3
out_df['score_8'] = temporal_df.score_8.values[:] * 2 / 3 + spatial_df.score_8.values * 1 / 3
out_df['score_9'] = temporal_df.score_9.values[:] * 2 / 3 + spatial_df.score_9.values * 1 / 3
out_df['score_10'] = temporal_df.score_10.values[:] * 2 / 3 + spatial_df.score_10.values * 1 / 3
out_df['score_11'] = temporal_df.score_11.values[:] * 2 / 3 + spatial_df.score_11.values * 1 / 3
out_df['score_12'] = temporal_df.score_12.values[:] * 2 / 3 + spatial_df.score_12.values * 1 / 3
out_df['score_13'] = temporal_df.score_13.values[:] * 2 / 3 + spatial_df.score_13.values * 1 / 3
out_df['score_14'] = temporal_df.score_14.values[:] * 2 / 3 + spatial_df.score_14.values * 1 / 3
out_df['score_15'] = temporal_df.score_15.values[:] * 2 / 3 + spatial_df.score_15.values * 1 / 3
out_df['score_16'] = temporal_df.score_16.values[:] * 2 / 3 + spatial_df.score_16.values * 1 / 3
out_df['score_17'] = temporal_df.score_17.values[:] * 2 / 3 + spatial_df.score_17.values * 1 / 3
out_df['score_18'] = temporal_df.score_18.values[:] * 2 / 3 + spatial_df.score_18.values * 1 / 3
out_df['score_19'] = temporal_df.score_19.values[:] * 2 / 3 + spatial_df.score_19.values * 1 / 3
out_df['score_20'] = temporal_df.score_20.values[:] * 2 / 3 + spatial_df.score_20.values * 1 / 3
out_df = out_df[out_df.score_0 < 0.99]
# outDf.to_csv(fusePath, index=False)
return out_df
def result_process(batch_win_info, batch_result_class,
batch_result_conf, batch_result_xmin, batch_result_xmax, config, batch_idx):
out_df = pandas.DataFrame(columns=config.outdf_columns)
for j in range(config.batch_size):
tmp_df = pandas.DataFrame()
win_info = batch_win_info[batch_idx][j] # one sample in window_info.log
# the following four attributes are produced by the above one
# winInfo sample, 108 kinds of anchors are the
# combination of different layer types and scale ratios
result_class = batch_result_class[batch_idx][j]
result_xmin = batch_result_xmin[batch_idx][j]
result_xmax = batch_result_xmax[batch_idx][j]
result_conf = batch_result_conf[batch_idx][j]
num_box = len(result_class) # (16*5+8*5+4*5) = sum of num_anchors*num_dbox
video_name = win_info[1]
tmp_df['video_name'] = [video_name] * num_box
tmp_df['start'] = [int(win_info[0])] * num_box
tmp_df['conf'] = result_conf
tmp_df['xmin'] = result_xmin
tmp_df['xmax'] = result_xmax
tmp_df.xmin = numpy.maximum(tmp_df.xmin, 0)
tmp_df.xmax = numpy.minimum(tmp_df.xmax, config.window_size)
tmp_df.xmin = tmp_df.xmin + tmp_df.start
tmp_df.xmax = tmp_df.xmax + tmp_df.start
for cidx in range(config.num_classes):
tmp_df['score_' + str(cidx)] = result_class[:, cidx]
if not config.save_predict_result:
# filter len(tmpDf) from 108 to ~20~40~
tmp_df = tmp_df[tmp_df.score_0 < config.filter_neg_threshold]
out_df =
|
pandas.concat([out_df, tmp_df])
|
pandas.concat
|
import logging
import pandas
import os
import numpy
from scipy import stats
from .. import Constants
from .. import Utilities
from .. import MatrixManager
from ..PredictionModel import WDBQF, WDBEQF, load_model, dataframe_from_weight_data
from ..misc import DataFrameStreamer
from . import AssociationCalculation
class SimpleContext(AssociationCalculation.Context):
def __init__(self, gwas, model, covariance):
self.gwas = gwas
self.model = model
self.covariance = covariance
def get_weights(self, gene):
w = self.model.weights
w = w[w.gene == gene]
return w
def get_covariance(self, gene, snps):
return self.covariance.get(gene, snps, strict_whitelist=False)
def get_n_in_covariance(self, gene):
return self.covariance.n_ids(gene)
def get_gwas(self, snps):
g = self.gwas
g = g[g[Constants.SNP].isin(snps)]
return g
def get_model_snps(self):
return set(self.model.weights.rsid)
def get_data_intersection(self):
return _data_intersection(self.model, self.gwas)
def provide_calculation(self, gene):
w = self.get_weights(gene)
gwas = self.get_gwas(w[WDBQF.K_RSID].values)
i = pandas.merge(w, gwas, left_on="rsid", right_on="snp")
if not Constants.BETA in i: i[Constants.BETA] = None
i = i[[Constants.SNP, WDBQF.K_WEIGHT, Constants.ZSCORE, Constants.BETA]]
snps, cov = self.get_covariance(gene, i[Constants.SNP].values)
# fast subsetting and aligning
d_columns = i.columns.values
if snps is not None and len(snps):
d = {x[0]: x for x in i.values}
d = [d[snp] for snp in snps]
d = list(zip(*d))
d = {d_columns[i]:d[i] for i in range(0, len(d_columns))}
i = pandas.DataFrame(d)
else:
i = pandas.DataFrame(columns=d_columns)
return len(w.weight), i, cov, snps
def get_model_info(self):
return self.model.extra
class OptimizedContext(SimpleContext):
def __init__(self, gwas, model, covariance, MAX_R):
self.covariance = covariance
self.genes, self.weight_data, self.snps_in_model = _prepare_weight_data(model, MAX_R)
self.gwas_data = _prepare_gwas_data(gwas)
self.extra = model.extra
self.last_gene = None
self.data_cache = None
self.pedantic = MAX_R is None
def _get_weights(self, gene):
w = self.weight_data[gene]
w = {x[WDBQF.RSID]:x[WDBQF.WEIGHT] for x in w}
return w
def get_weights(self, gene):
w = self.weight_data[gene]
w = dataframe_from_weight_data(list(zip(*w)))
return w
def get_model_snps(self):
return set(self.snps_in_model)
def _get_gwas(self, snps):
snps = set(snps)
g = self.gwas_data
g = [g[x] for x in snps if x in g]
g = {x[0]:(x[1], x[2]) for x in g}
return g
def get_gwas(self, snps):
snps = set(snps)
g = self.gwas_data
g = [g[x] for x in snps if x in g]
if len(g):
g = list(zip(*g))
g = pandas.DataFrame({Constants.SNP:g[0], Constants.ZSCORE:g[1], Constants.BETA:g[2]})
else:
g = pandas.DataFrame(columns=[Constants.SNP, Constants.ZSCORE, Constants.BETA])
return g
def get_data_intersection(self):
return _data_intersection_3(self.weight_data, self.gwas_data, self.extra.gene.values, self.pedantic)
def provide_calculation(self, gene):
if gene != self.last_gene:
#dummy while(True) to emulate go/to
while True:
w = self._get_weights(gene)
gwas = self._get_gwas(list(w.keys()))
type = [numpy.str, numpy.float64, numpy.float64, numpy.float64]
columns = [Constants.SNP, WDBQF.K_WEIGHT, Constants.ZSCORE, Constants.BETA]
d = {x: v for x, v in w.items() if x in gwas}
snps, cov = self.get_covariance(gene, list(d.keys()))
if snps is None:
d = pandas.DataFrame(columns=columns)
self.data_cache = len(w), d, cov, snps
self.last_gene = gene
break
d = [(x, w[x], gwas[x][0], gwas[x][1]) for x in snps]
d = list(zip(*d))
if len(d):
d = {columns[i]:numpy.array(d[i], dtype=type[i]) for i in range(0,len(columns))}
else:
d = {columns[i]:numpy.array([]) for i in range(0,len(columns))}
self.data_cache = len(w), d, cov, snps
self.last_gene = gene
break
return self.data_cache
def get_model_info(self):
return self.extra
def _data_intersection(model, gwas):
weights = model.weights
k = pandas.merge(weights, gwas, how='inner', left_on="rsid", right_on="snp")
genes = k.gene.drop_duplicates().values
snps = k.rsid.drop_duplicates().values
return genes, snps
def _data_intersection_2(weight_data, gwas_data):
genes = set()
snps = set()
for gene, entries in weight_data.items():
gs = list(zip(*entries))[WDBQF.RSID]
for s in gs:
if s in gwas_data:
genes.add(gene)
snps.add(s)
return genes, snps
def _data_intersection_3(weight_data, gwas_data, gene_list, pedantic):
genes = list()
_genes = set()
snps =set()
for gene in gene_list:
if not gene in weight_data:
if pedantic:
logging.warning("Issues processing gene %s, skipped", gene)
continue
gs = list(zip(*weight_data[gene]))[WDBQF.RSID]
for s in gs:
if s in gwas_data:
if not gene in _genes:
_genes.add(gene)
genes.append(gene)
snps.add(s)
return genes, snps
def _sanitized_gwas(gwas):
gwas = gwas[[Constants.SNP, Constants.ZSCORE, Constants.BETA]]
if numpy.any(~ numpy.isfinite(gwas[Constants.ZSCORE])):
logging.warning("Discarding non finite GWAS zscores")
gwas = gwas.loc[numpy.isfinite(gwas[Constants.ZSCORE])]
return gwas
def _prepare_gwas(gwas):
#If zscore is numeric, then everything is fine with us.
# if not, try to remove "NA" strings.
try:
i = gwas.zscore.apply(lambda x: x != "NA")
gwas = gwas.loc[i]
gwas = pandas.DataFrame(gwas)
gwas = gwas.assign(**{Constants.ZSCORE:gwas.zscore.astype(numpy.float64)})
except Exception as e:
logging.info("Unexpected issue preparing gwas... %s", str(e))
pass
if not Constants.BETA in gwas:
gwas = gwas.assign(**{Constants.BETA: numpy.nan})
return gwas
def _prepare_gwas_data(gwas):
data = {}
for x in gwas.values:
data[x[0]] = x
return data
def _prepare_model(model):
K = WDBQF.K_GENE
g = model.weights[K]
model.weights[K] = pandas.Categorical(g, g.drop_duplicates())
return model
def _prepare_weight_data(model, MAX_R=None):
d,_d = [],{}
snps = set()
for x in model.weights.values:
if MAX_R and len(d) + 1 > MAX_R:
logging.info("Restricting data load to first %d", MAX_R)
break
gene = x[WDBQF.GENE]
if not gene in d:
_d[gene] = []
d.append(gene)
entries = _d[gene]
entries.append(x)
snps.add(x[WDBQF.RSID])
return d, _d, snps
def _beta_loader(args):
beta_contents = Utilities.contentsWithPatternsFromFolder(args.beta_folder, [])
r = pandas.DataFrame()
for beta_name in beta_contents:
logging.info("Processing %s", beta_name)
beta_path = os.path.join(args.beta_folder, beta_name)
b = pandas.read_table(beta_path)
r = pandas.concat([r, b])
return r
def _gwas_wrapper(gwas):
logging.info("Processing loaded gwas")
return gwas
def build_context(args, gwas):
logging.info("Loading model from: %s", args.model_db_path)
model = load_model(args.model_db_path, args.model_db_snp_key)
if not args.single_snp_model:
if not args.stream_covariance:
logging.info("Loading covariance data from: %s", args.covariance)
covariance_manager = MatrixManager.load_matrix_manager(args.covariance)
else:
logging.info("Using streamed covariance from: %s", args.covariance)
logging.warning("This version is more lenient with input covariances, as many potential errors can't be checked for the whole input covariance in advance. Pay extra care to your covariances!")
streamer = DataFrameStreamer.data_frame_streamer(args.covariance, "GENE")
covariance_manager = MatrixManager.StreamedMatrixManager(streamer, MatrixManager.GENE_SNP_COVARIANCE_DEFINITION)
else:
logging.info("Bypassing covariance for single-snp-models")
d = model.weights[[WDBQF.K_GENE, WDBQF.K_RSID]].rename(columns={WDBQF.K_RSID:"id1"})
d = d.assign(id2=d.id1, value=1)
covariance_manager = MatrixManager.MatrixManager(d, {MatrixManager.K_MODEL:WDBQF.K_GENE, MatrixManager.K_ID1:"id1", MatrixManager.K_ID2:"id2", MatrixManager.K_VALUE:"value"})
gwas = _gwas_wrapper(gwas) if gwas is not None else _beta_loader(args)
context = _build_context(model, covariance_manager, gwas, args.MAX_R)
return context
def _build_context(model, covariance_manager, gwas, MAX_R=None):
gwas = _prepare_gwas(gwas)
gwas = _sanitized_gwas(gwas)
context = OptimizedContext(gwas, model, covariance_manager, MAX_R)
return context
def _build_simple_context(model, covariance_manager, gwas):
model = _prepare_model(model)
gwas = _prepare_gwas(gwas)
gwas = _sanitized_gwas(gwas)
context = SimpleContext(gwas, model, covariance_manager)
return context
def _to_int(d):
r = d
try:
r = int(d)
except:
pass
return r
def _results_column_order(with_additional=False):
K = Constants
AK = AssociationCalculation.ARF
column_order = [WDBQF.K_GENE,
WDBEQF.K_GENE_NAME,
K.ZSCORE,
AK.K_EFFECT_SIZE,
Constants.PVALUE,
AK.K_VAR_G,
WDBEQF.K_PRED_PERF_R2,
WDBEQF.K_PRED_PERF_PVAL,
WDBEQF.K_PRED_PERF_QVAL,
AK.K_N_SNPS_USED,
AK.K_N_SNPS_IN_COV,
WDBEQF.K_N_SNP_IN_MODEL]
if with_additional:
ADD = AssociationCalculation.ASF
column_order.extend([ADD.K_BEST_GWAS_P, ADD.K_LARGEST_WEIGHT])
return column_order
def format_output(results, context, remove_ens_version):
results = results.drop("n_snps_in_model",1)
# Dodge the use of cdf on non finite values
i = numpy.isfinite(results.zscore)
results[Constants.PVALUE] = numpy.nan
results.loc[i, Constants.PVALUE] = 2 * stats.norm.sf(numpy.abs(results.loc[i, Constants.ZSCORE].values))
model_info = pandas.DataFrame(context.get_model_info())
merged =
|
pandas.merge(results, model_info, how="inner", on="gene")
|
pandas.merge
|
import logging
import traceback
import pandas as pd
from os import sys, path
root_path = path.dirname(path.dirname(path.abspath(__file__)))
try:
import Database.NoSqlRw as NoSqlRw
import Database.AliasTable as AliasTable
import DataHub.DataUtility as DataUtility
from Utiltity.common import *
from Utiltity.df_utility import *
from Utiltity.time_utility import *
from Database.DatabaseEntry import DatabaseEntry
from Database.UpdateTableEx import UpdateTableEx
from Utiltity.plugin_manager import PluginManager
except Exception as e:
sys.path.append(root_path)
import Database.NoSqlRw as NoSqlRw
import Database.AliasTable as AliasTable
import DataHub.DataUtility as DataUtility
from Utiltity.common import *
from Utiltity.df_utility import *
from Utiltity.time_utility import *
from Database.DatabaseEntry import DatabaseEntry
from Database.UpdateTableEx import UpdateTableEx
from Utiltity.plugin_manager import PluginManager
finally:
logger = logging.getLogger('')
"""
We don't need to specify the filed currently. Just using the alias table.
If we want to standardize the field. Just rename all the fields.
"""
NEED_COLLECTOR_CAPACITY = ['BalanceSheet', 'CashFlowStatement', 'IncomeStatement']
ROOT_TAGS = ['BalanceSheet', 'CashFlowStatement', 'IncomeStatement']
TABLE_BALANCE_SHEET = 'BalanceSheet'
TABLE_CASH_FLOW_STATEMENT = 'CashFlowStatement'
TABLE_INCOME_STATEMENT = 'IncomeStatement'
TABLE_LIST = [TABLE_BALANCE_SHEET, TABLE_CASH_FLOW_STATEMENT, TABLE_INCOME_STATEMENT]
IDENTITY_FINANCE_DATA = '<stock_code>.<exchange>'
QUERY_FIELD = {
'content': ([str], ROOT_TAGS),
'stock_identity': ([str], []),
# 'report_type': ([str], ['ANNUAL', 'SEMIANNUAL', 'QUARTERLY', 'MONTHLY', 'WEEKLY']),
'since': ([datetime.datetime, None], []),
'until': ([datetime.datetime, None], [])}
RESULT_FIELD = {
'identity': (['str'], []),
'period': (['datetime'], [])} # The last day of report period
class FinanceData(DataUtility.DataUtility):
def __init__(self, plugin: PluginManager, update: UpdateTableEx):
super().__init__(plugin, update)
self.__cached_data = {
'BalanceSheet': {},
'IncomeStatement': {},
'CashFlowStatement': {},
}
# key: report_type; value: stock_identity
self.__save_table = {
'BalanceSheet': [],
'IncomeStatement': [],
'CashFlowStatement': [],
}
# ------------------------------------------------------------------------------------------------------------------
def execute_update_patch(self, patch: DataUtility.Patch) -> DataUtility.RESULT_CODE:
logger.info('FinanceData.execute_update_patch(' + str(patch) + ')')
if not self.is_data_support(patch.tags):
logger.info('FinanceData.execute_update_patch() - Data is not support.')
return DataUtility.RESULT_NOT_SUPPORTED
report_type = patch.tags[0]
save_list = self.__save_table.get(report_type)
report_dict = self.__cached_data.get(report_type)
if report_dict is None or save_list is None:
# Should not reach here
logger.error('Cannot not get report dict for ' + report_type)
return DataUtility.RESULT_FAILED
stock_identity = normalize_stock_identity(patch.tags[1])
df = self.__do_fetch_finance_data(report_type, stock_identity, patch.since, patch.until)
if df is None or len(df) == 0:
return DataUtility.RESULT_FAILED
# self.__alias_table.tell_names(list(df.columns))
# self.__alias_table.check_save()
# df.set_index('period')
codes = df['identity'].unique()
for code in codes:
new_df = df[df['identity'] == code]
if new_df is None or len(new_df) == 0:
continue
if code in report_dict.keys() and report_dict[code] is not None:
old_df = report_dict[code]
concated_df = concat_dataframe_row_by_index([old_df, new_df])
report_dict[code] = concated_df
else:
report_dict[code] = new_df
if code not in save_list:
save_list.append(code)
return DataUtility.RESULT_SUCCESSFUL
def trigger_save_data(self, patches: [DataUtility.Patch]) -> DataUtility.RESULT_CODE:
result = self.__save_cached_data()
if result:
if self.get_update_table().update_latest_update_time('SecuritiesInfo', '', ''):
return DataUtility.RESULT_SUCCESSFUL
else:
return DataUtility.RESULT_FAILED
return DataUtility.RESULT_FAILED
# -------------------------------------------------- probability --------------------------------------------------
def get_root_tags(self) -> [str]:
nop(self)
return NEED_COLLECTOR_CAPACITY
def is_data_support(self, tags: [str]) -> bool:
nop(self)
return (tags is not None) and (isinstance(tags, list)) and (len(tags) > 2) and (tags[0] in ROOT_TAGS)
def get_cached_data_range(self, tags: [str]) -> (datetime.datetime, datetime.datetime):
if not self.is_data_support(tags):
return None, None
df = self.__cached_data.get(tags[0])
if df is None or len(df) == 0:
return None, None
min_date = min(df['trade_date'])
max_date = max(df['trade_date'])
return text_auto_time(min_date), text_auto_time(max_date)
# --------------------------------------------------- private if ---------------------------------------------------
def data_from_cache(self, selector: DataUtility.Selector) -> pd.DataFrame or None:
result = None
if not self.is_data_support(selector.tags):
logger.error('FinanceData.data_from_cache() - Error selector tags : ' + str(selector.tags))
return None
report_type = selector.tags[0]
report_dict = self.__cached_data.get(report_type)
if report_dict is None:
logger.error('FinanceData.data_from_cache() - Do not support this kind of data : ' + report_type)
return None
stock_identity = selector.tags[1]
stock_identity = normalize_stock_identity(stock_identity)
stock_data = report_dict.get(stock_identity)
if stock_data is None:
self.__load_cached_data(selector.tags)
stock_data = report_dict.get(stock_identity)
if stock_data is None:
return None
df = slice_dataframe_by_datetime(stock_data, selector.since, selector.until)
return df
# -------------------------------------------------- probability --------------------------------------------------
def get_reference_data_range(self, tags: [str]) -> (datetime.datetime, datetime.datetime):
nop(self, tags)
return [None, None]
# -----------------------------------------------------------------------------------------------------------------
def __do_fetch_finance_data(self, report_type: str, stock_identity: str,
report_since: datetime.datetime, report_until: datetime.datetime) -> pd.DataFrame:
if report_type not in ROOT_TAGS:
return None
argv = {
'content': report_type,
'stock_identity': stock_identity,
'since': report_since,
'until': report_until,
}
if not self._check_dict_param(argv, QUERY_FIELD):
return None
plugins = self.get_plugin_manager().find_module_has_capacity(report_type)
for plugin in plugins:
df = self.get_plugin_manager().execute_module_function(plugin, 'fetch_data', argv)
if df is None or not isinstance(df, pd.DataFrame) or len(df) == 0 or \
not self._check_dataframe_field(df, RESULT_FIELD, list(RESULT_FIELD.keys())):
logger.info('Finance data - Fetch data format Error.')
continue
return df
return None
def __load_cached_data(self, tags: [str]) -> bool:
report_type = tags[0]
stock_identity = tags[1]
data_table = DatabaseEntry().get_finance_table(report_type)
record = data_table.query(stock_identity)
if record is not None and len(record) > 0:
df =
|
pd.DataFrame(record)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha44']
return alpha
@timer
def alpha45(self):
volume = self.volume
vwap = self.vwap
close = self.close
Open = self.open
price = pd.concat([close,Open],axis = 1,join = 'inner')
price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4
price_delta = Delta(pd.DataFrame(price['price']),1)
r1 = Rank(price_delta)
volume_mean = Mean(volume,150)
data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,15)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha45']
return alpha
@timer
def alpha46(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner')
data.columns = ['c','c3','c6','c12','c24']
alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c'])
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha46']
return alpha
@timer
def alpha47(self):
close = self.close
low = self.low
high = self.high
high_max = TsMax(high,6)
low_min = TsMin(low,6)
data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner')
data.columns = ['high_max','low_min','close']
temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \
data['low_min']) * 100)
alpha = SMA(temp,9,1)
alpha.columns = ['alpha47']
return alpha
@timer
def alpha48(self):
close = self.close
volume = self.volume
temp1 = Delta(close,1)
temp1_delay1 = Delay(temp1,1)
temp1_delay2 = Delay(temp1,2)
data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner')
data.columns = ['temp1','temp1_delay1','temp1_delay2']
temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \
+ np.sign(data['temp1_delay2']))
volume_sum5 = Sum(volume,5)
volume_sum20 = Sum(volume,20)
data_temp = pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
data_temp.columns = ['temp2','volume_sum5','volume_sum20']
temp3 = pd.DataFrame(data_temp['temp2'] * data_temp['volume_sum5']/\
data_temp['volume_sum20'])
alpha = -1 * Rank(temp3)
alpha.columns = ['alpha48']
return alpha
@timer
def alpha49(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 0
price['temp'][price['sum'] < price['sum_delay']] = 1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha49']
return alpha
@timer
def alpha50(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = -1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha50']
return alpha
@timer
def alpha51(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = 0
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha51']
return alpha
@timer
def alpha52(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
data['sum_delay'] = Delay(pd.DataFrame((data['High'] + data['Low'] + data['Close'])/3),1)
temp1 = pd.DataFrame(data['High'] - data['sum_delay'])
temp1.columns = ['high_diff']
temp2 = pd.DataFrame(data['sum_delay'] - data['Low'])
temp2.columns = ['low_diff']
temp1['max'] = temp1['high_diff']
temp1['max'][temp1['high_diff'] < 0 ] = 0
temp2['max'] = temp2['low_diff']
temp2['max'][temp2['low_diff'] < 0 ] = 0
temp1_sum = Sum(pd.DataFrame(temp1['max']),26)
temp2_sum = Sum(pd.DataFrame(temp2['max']),26)
alpha_temp = pd.concat([temp1_sum,temp2_sum],axis = 1,join = 'inner')
alpha_temp.columns = ['s1','s2']
alpha = pd.DataFrame(alpha_temp['s1']/alpha_temp['s2'] * 100)
alpha.columns = ['alpha52']
return alpha
@timer
def alpha53(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,12)
alpha = count/12.0 * 100
alpha.columns = ['alpha53']
return alpha
@timer
def alpha54(self):
Open = self.open
close = self.close
data = pd.concat([Open,close], axis = 1, join = 'inner')
data.columns = ['close','open']
temp = pd.DataFrame(data['close'] - data['open'])
temp_abs = pd.DataFrame(np.abs(temp))
df = pd.concat([temp,temp_abs], axis = 1, join= 'inner')
df.columns = ['temp','abs']
std = STD(pd.DataFrame(df['temp'] + df['abs']),10)
corr = Corr(data,10)
data1 = pd.concat([corr,std],axis = 1, join = 'inner')
data1.columns = ['corr','std']
alpha = Rank(pd.DataFrame(data1['corr'] + data1['std'])) * -1
alpha.columns = ['alpha54']
return alpha
@timer
def alpha55(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
tep = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha = Sum(tep,20)
alpha.columns = ['alpha55']
return alpha
@timer
def alpha56(self):
low = self.low
high = self.high
volume = self.volume
Open = self.open
open_min = TsMin(Open,12)
data1 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data1.columns = ['open','open_min']
r1 = Rank(pd.DataFrame(data1['open'] - data1['open_min']))
volume_mean = Mean(volume,40)
volume_mean_sum= Sum(volume_mean,19)
data2 = pd.concat([high,low],axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
rank = pd.concat([temp,volume_mean_sum],axis = 1 , join = 'inner')
rank.columns = ['temp','volume_mean_sum']
corr = Corr(rank,13)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] >= r['r2']] = 1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha56']
return alpha
@timer
def alpha57(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data['close'] - data['low_min'])/(data['high_max'] \
- data['low_min']) * 100)
alpha = SMA(temp,3,1)
alpha.columns = ['alpha57']
return alpha
@timer
def alpha58(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,20)
alpha = count/20.0 * 100
alpha.columns = ['alpha58']
return alpha
@timer
def alpha59(self):
low = self.low
high = self.high
close = self.close
close_delay = Delay(close,1)
max_temp = pd.concat([high,close_delay],axis = 1,join = 'inner')
min_temp = pd.concat([low,close_delay],axis = 1,join = 'inner')
max_temp1 = pd.DataFrame(np.max(max_temp,axis = 1))
min_temp1 = pd.DataFrame(np.min(min_temp,axis = 1))
data = pd.concat([close,close_delay,max_temp1,min_temp1],axis = 1,join = 'inner')
data.columns = ['close','close_delay','max','min']
data['max'][data['close'] > data['close_delay']] = 0
data['min'][data['close'] <= data['close_delay']] = 0
alpha = pd.DataFrame(data['max'] + data['min'])
alpha.columns = ['alpha59']
return alpha
@timer
def alpha60(self):
low = self.low
high = self.high
close = self.close
volume = self.volume
data = pd.concat([low,high,close,volume],axis = 1,join = 'inner')
temp = pd.DataFrame((2 * data['Close'] - data['Low'] - data['High'])/(data['Low'] + \
data['High']) * data['Vol'])
alpha = Sum(temp,20)
alpha.columns = ['alpha60']
return alpha
@timer
def alpha61(self):
low = self.low
volume = self.volume
vwap = self.vwap
vwap_delta = Delta(vwap,1)
vwap_delta_decay = DecayLinear(vwap_delta,12)
r1 = Rank(vwap_delta_decay)
volume_mean = Mean(volume,80)
data = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,8)
corr_decay = DecayLinear(corr,17)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) * -1)
alpha.columns = ['alpha61']
return alpha
@timer
def alpha62(self):
high = self.high
volume = self.volume
volume_r = Rank(volume)
data = pd.concat([high,volume_r],axis = 1,join = 'inner')
alpha = -1 * Corr(data,5)
alpha.columns = ['alpha62']
return alpha
@timer
def alpha63(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),6,1)
sma2 = SMA(pd.DataFrame(data['abs']),6,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha63']
return alpha
@timer
def alpha64(self):
vwap = self.vwap
volume = self.volume
close = self.close
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data1 = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr1 = Corr(data1,4)
corr1_decay = DecayLinear(corr1,4)
r1 = Rank(corr1_decay)
close_mean = Mean(close,60)
close_r = Rank(close)
close_mean_r = Rank(close_mean)
data2 = pd.concat([close_r,close_mean_r],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_max = TsMax(corr2,13)
corr2_max_decay = DecayLinear(corr2_max,14)
r2 = Rank(corr2_max_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) *-1)
alpha.columns = ['alpha64']
return alpha
@timer
def alpha65(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = pd.DataFrame(data['close_mean']/data['close'])
alpha.columns = ['alpha65']
return alpha
@timer
def alpha66(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha66']
return alpha
@timer
def alpha67(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),24,1)
sma2 = SMA(pd.DataFrame(data['abs']),24,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha67']
return alpha
@timer
def alpha68(self):
high = self.high
volume = self.volume
low = self.low
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['sum']= (data['High'] + data['Low'])/2
data['sum_delta'] = Delta(pd.DataFrame(data['sum']),1)
temp = data['sum_delta'] * (data['High'] - data['Low'])/data['Vol']
alpha = SMA(pd.DataFrame(temp),15,2)
alpha.columns = ['alpha68']
return alpha
@timer
def alpha69(self):
high = self.high
low = self.low
Open = self.open
dtm = DTM(Open,high)
dbm = DBM(Open,low)
dtm_sum = Sum(dtm,20)
dbm_sum = Sum(dbm,20)
data = pd.concat([dtm_sum,dbm_sum],axis = 1, join = 'inner')
data.columns = ['dtm','dbm']
data['temp1'] = (data['dtm'] - data['dbm'])/data['dtm']
data['temp2'] = (data['dtm'] - data['dbm'])/data['dbm']
data['temp1'][data['dtm'] <= data['dbm']] = 0
data['temp2'][data['dtm'] >= data['dbm']] = 0
alpha = pd.DataFrame(data['temp1'] + data['temp2'])
alpha.columns = ['alpha69']
return alpha
@timer
def alpha70(self):
amount = self.amt
alpha= STD(amount,6)
alpha.columns = ['alpha70']
return alpha
@timer
def alpha71(self):
close = self.close
close_mean = Mean(close,24)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha71']
return alpha
@timer
def alpha72(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),15,1)
alpha.columns = ['alpha72']
return alpha
@timer
def alpha73(self):
vwap = self.vwap
volume = self.volume
close = self.close
data1 = pd.concat([close,volume],axis = 1,join = 'inner')
corr1 = Corr(data1,10)
corr1_decay = DecayLinear(DecayLinear(corr1,16),4)
r1 = TsRank(corr1_decay,5)
volume_mean = Mean(volume,30)
data2 = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1,join ='inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns= ['alpha73']
return alpha
@timer
def alpha74(self):
vwap = self.vwap
volume = self.volume
low = self.low
volume_mean = Mean(volume,40)
volume_mean_sum = Sum(volume_mean,20)
data1 = pd.concat([low,vwap],axis = 1,join = 'inner')
data_sum = Sum(pd.DataFrame(data1['Low'] * 0.35 + data1['Vwap'] * 0.65),20)
data = pd.concat([volume_mean_sum,data_sum],axis = 1,join = 'inner')
corr = Corr(data,7)
r1 = Rank(corr)
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data_temp = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr2 = Corr(data_temp,6)
r2 = Rank(corr2)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha74']
return alpha
@timer
def alpha75(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1.columns = ['close','open']
data1['temp'] = 1
data1['temp'][data1['close'] <= data1['open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2.columns = ['close','open']
data2['tep'] = 1
data2['tep'][data2['close'] > data2['open']] = 0
temp = data1['temp'].unstack()
tep = data2['tep'].unstack()
tep1 = repmat(tep,1,np.size(temp,1))
data3 = temp * tep1
temp_result = data3.rolling(50,min_periods = 50).sum()
tep_result = tep.rolling(50,min_periods = 50).sum()
tep2_result = np.matlib.repmat(tep_result,1,np.size(temp,1))
result = temp_result/tep2_result
alpha = pd.DataFrame(result.stack())
alpha.columns = ['alpha75']
return alpha
@timer
def alpha76(self):
volume = self.volume
close = self.close
close_delay = Delay(close,1)
data = pd.concat([volume,close,close_delay],axis = 1,join = 'inner')
data.columns = ['volume','close','close_delay']
temp = pd.DataFrame(np.abs((data['close']/data['close_delay'] -1 )/data['volume']))
temp_std = STD(temp,20)
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp_std,temp_mean],axis = 1,join = 'inner')
data_temp.columns = ['std','mean']
alpha = pd.DataFrame(data_temp['std']/data_temp['mean'])
alpha.columns = ['alpha76']
return alpha
@timer
def alpha77(self):
vwap = self.vwap
volume = self.volume
low = self.low
high = self.high
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
temp = pd.DataFrame((data['High'] + data['Low'])/2 - data['Vwap'])
temp_decay = DecayLinear(temp,20)
r1 = Rank(temp_decay)
temp1 = pd.DataFrame((data['High'] + data['Low'])/2)
volume_mean = Mean(volume,40)
data2 = pd.concat([temp1,volume_mean],axis = 1,join = 'inner')
corr = Corr(data2,3)
corr_decay = DecayLinear(corr,6)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.min(r,axis = 1))
alpha.columns = ['alpha77']
return alpha
@timer
def alpha78(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
temp = pd.DataFrame((data['Low'] + data['High'] + data['Close'])/3)
temp.columns = ['temp']
temp_mean = Mean(temp,12)
temp_mean.columns = ['temp_mean']
temp2 = pd.concat([temp,temp_mean],axis = 1,join = 'inner')
tmp = pd.DataFrame(temp2['temp'] - temp2['temp_mean'])
data1 = pd.concat([close,temp_mean],axis = 1,join = 'inner')
temp_abs = pd.DataFrame(np.abs(data1['Close'] - data1['temp_mean']))
temp_abs_mean = Mean(temp_abs,12)
df = pd.concat([tmp,temp_abs_mean],axis = 1,join = 'inner')
df.columns = ['df1','df2']
alpha = pd.DataFrame(df['df1']/(df['df2'] * 0.015))
alpha.columns = ['alpha78']
return alpha
@timer
def alpha79(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),12,1)
sma2 = SMA(pd.DataFrame(data['abs']),12,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha79']
return alpha
@timer
def alpha80(self):
volume = self.volume
volume_delay = Delay(volume,5)
volume_delay.columns = ['volume_delay']
data = pd.concat([volume,volume_delay],axis = 1,join = 'inner')
alpha = (data['Vol'] - data['volume_delay'])/data['volume_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha80']
return alpha
@timer
def alpha81(self):
volume = self.volume
alpha = SMA(volume,21,2)
alpha.columns = ['alpha81']
return alpha
@timer
def alpha82(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),20,1)
alpha.columns = ['alpha82']
return alpha
@timer
def alpha83(self):
high = self.high
volume = self.volume
high_r = Rank(high)
volume_r = Rank(volume)
data = pd.concat([high_r,volume_r],axis = 1,join = 'inner')
corr = Corr(data,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha83']
return alpha
@timer
def alpha84(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,20)
alpha.columns = ['alpha84']
return alpha
@timer
def alpha85(self):
close = self.close
volume = self.volume
volume_mean = Mean(volume,20)
close_delta = Delta(close,7)
data1 = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
data1.columns = ['volume','volume_mean']
temp1 = pd.DataFrame(data1['volume']/data1['volume_mean'])
r1 = TsRank(temp1,20)
r2 = TsRank(-1 * close_delta,8)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha85']
return alpha
@timer
def alpha86(self):
close = self.close
close_delay20 = Delay(close,20)
close_delay10 = Delay(close,20)
data = pd.concat([close,close_delay20,close_delay10],axis = 1,join = 'inner')
data.columns = ['close','close_delay20','close_delay10']
temp = pd.DataFrame((data['close_delay20'] - data['close_delay10'])/10 - \
(data['close_delay10'] - data['close'])/10)
close_delta = Delta(close,1) * -1
data_temp = pd.concat([close_delta,temp],axis = 1,join = 'inner')
data_temp.columns = ['close_delta','temp']
data_temp['close_delta'][data_temp['temp'] > 0.25]= -1
data_temp['close_delta'][data_temp['temp'] < 0]= 1
alpha = pd.DataFrame(data_temp['close_delta'])
alpha.columns = ['alpha86']
return alpha
@timer
def alpha87(self):
vwap = self.vwap
high = self.high
low = self.low
Open = self.open
vwap_delta = Delta(vwap,4)
vwap_delta_decay = DecayLinear(vwap_delta,7)
r1 = Rank(vwap_delta_decay)
data = pd.concat([low,high,vwap,Open], axis = 1, join = 'inner')
temp = pd.DataFrame((data['Low'] * 0.1 + data['High'] * 0.9 - data['Vwap'])/\
(data['Open'] - 0.5 * (data['Low'] + data['High'])))
temp_decay = DecayLinear(temp,11)
r2 = TsRank(temp_decay,7)
r = pd.concat([r1,r2], axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(-1 * (r['r1'] + r['r2']))
alpha.columns = ['alpha87']
return alpha
@timer
def alpha88(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delta']
alpha = (data['close'] - data['close_delta'])/data['close_delta'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha88']
return alpha
@timer
def alpha89(self):
close = self.close
sma1 = SMA(close,13,2)
sma2 = SMA(close,27,2)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3],axis = 1, join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(2 *(data['temp'] - data['sma']))
alpha.columns = ['alpha89']
return alpha
@timer
def alpha90(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
corr = Corr(rank,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha90']
return alpha
@timer
def alpha91(self):
close = self.close
volume = self.volume
low = self.low
close_max = TsMax(close,5)
data1 = pd.concat([close,close_max], axis = 1,join = 'inner')
data1.columns = ['close','close_max']
r1 = Rank(pd.DataFrame(data1['close'] - data1['close_max']))
volume_mean = Mean(volume,40)
data2 = pd.concat([volume_mean,low], axis = 1, join = 'inner')
corr = Corr(data2,5)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha91']
return alpha
@timer
def alpha92(self):
volume = self.volume
vwap = self.vwap
close = self.close
data = pd.concat([close,vwap],axis = 1, join = 'inner')
data['price'] = data['Close'] * 0.35 + data['Vwap'] * 0.65
price_delta = Delta(pd.DataFrame(data['price']),2)
price_delta_decay = DecayLinear(price_delta,3)
r1 = Rank(price_delta_decay)
volume_mean = Mean(volume,180)
rank = pd.concat([volume_mean,close],axis = 1,join = 'inner')
corr = Corr(rank,13)
temp = pd.DataFrame(np.abs(corr))
temp_decay = DecayLinear(temp,5)
r2 = TsRank(temp_decay,15)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
alpha = pd.DataFrame(-1 * np.max(r, axis = 1))
alpha.columns = ['alpha92']
return alpha
@timer
def alpha93(self):
low = self.low
Open = self.open
open_delay = Delay(Open,1)
data = pd.concat([low,Open,open_delay],axis = 1,join = 'inner')
data.columns = ['low','open','open_delay']
temp1 = pd.DataFrame(data['open'] - data['low'])
temp2 = pd.DataFrame(data['open'] - data['open_delay'])
data_temp = pd.concat([temp1,temp2],axis = 1 ,join = 'inner')
temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
temp_max.columns = ['max']
data2 = pd.concat([data,temp_max],axis = 1,join = 'inner')
data2['temp'] = data2['max']
data2['temp'][data2['open'] >= data2['open_delay']] = 0
alpha = Sum(pd.DataFrame(data2['temp']),20)
alpha.columns = ['alpha93']
return alpha
@timer
def alpha94(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,30)
alpha.columns = ['alpha94']
return alpha
@timer
def alpha95(self):
amt = self.amt
alpha = STD(amt,20)
alpha.columns = ['alpha95']
return alpha
@timer
def alpha96(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = ( data['close'] - data['low_min'])/(data['high_max'] - data['low_min']) * 100
alpha_temp = SMA(pd.DataFrame(temp),3,1)
alpha = SMA(alpha_temp,3,1)
alpha.columns = ['alpha96']
return alpha
@timer
def alpha97(self):
volume = self.volume
alpha = STD(volume,10)
alpha.columns = ['alpha97']
return alpha
@timer
def alpha98(self):
close = self.close
close_mean = Mean(close,100)
close_mean_delta = Delta(close_mean,100)
close_delay = Delay(close,100)
data = pd.concat([close_mean_delta,close_delay],axis = 1,join = 'inner')
data.columns = ['delta','delay']
temp = pd.DataFrame(data['delta']/ data['delay'])
close_delta = Delta(close,3)
close_min = TsMin(close,100)
data_temp = pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner')
data_temp.columns = ['close','close_delta','close_min','temp']
data_temp['diff'] = (data_temp['close'] - data_temp['close_min']) * -1
data_temp['diff'][data_temp['temp'] < 0.05] = 0
data_temp['close_delta'] = data_temp['close_delta'] * -1
data_temp['close_delta'][data_temp['temp'] >= 0.05]= 0
alpha = pd.DataFrame(data_temp['close_delta'] + data_temp['diff'])
alpha.columns = ['alpha98']
return alpha
@timer
def alpha99(self):
close = self.close
volume = self.volume
r1 = Rank(close)
r2 = Rank(volume)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
cov = Cov(r,5)
alpha = -1 * Rank(cov)
alpha.columns = ['alpha99']
return alpha
@timer
def alpha100(self):
volume = self.volume
alpha = STD(volume,20)
alpha.columns = ['alpha100']
return alpha
@timer
def alpha101(self):
close = self.close
volume = self.volume
high = self.high
vwap = self.vwap
volume_mean = Mean(volume,30)
volume_mean_sum = Sum(volume_mean,37)
data1 = pd.concat([close,volume_mean_sum], axis = 1, join = 'inner')
corr1 = Corr(data1,15)
r1 = Rank(corr1)
data2 = pd.concat([high,vwap],axis = 1, join = 'inner')
temp = pd.DataFrame(data2['High'] * 0.1 + data2['Vwap'] * 0.9)
temp_r = Rank(temp)
volume_r = Rank(volume)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,11)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] < r['r2']] = -1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha101']
return alpha
@timer
def alpha102(self):
volume = self.volume
temp = Delta(volume,1)
temp.columns = ['temp']
temp['max'] = temp['temp']
temp['max'][temp['temp'] < 0 ] = 0
temp['abs'] = np.abs(temp['temp'])
sma1 = SMA(pd.DataFrame(temp['max']),6,1)
sma2 = SMA(pd.DataFrame(temp['abs']),6,1)
sma = pd.concat([sma1,sma2], axis = 1 ,join ='inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/ sma['sma2'] * 100)
alpha.columns = ['alpha102']
return alpha
@timer
def alpha103(self):
low = self.low
lowday = Lowday(low,20)
alpha = (20 - lowday)/20.0 * 100
alpha.columns = ['alpha103']
return alpha
@timer
def alpha104(self):
close = self.close
volume = self.volume
high = self.high
data = pd.concat([high,volume], axis = 1, join = 'inner')
corr = Corr(data,5)
corr_delta = Delta(corr,5)
close_std = STD(close,20)
r1 = Rank(close_std)
temp = pd.concat([corr_delta,r1], axis = 1, join = 'inner')
temp.columns = ['delta','r']
alpha = pd.DataFrame(-1 * temp['delta'] * temp['r'])
alpha.columns = ['alpha104']
return alpha
@timer
def alpha105(self):
volume = self.volume
Open = self.open
volume_r = Rank(volume)
open_r = Rank(Open)
rank = pd.concat([volume_r,open_r],axis = 1, join = 'inner')
alpha = -1 * Corr(rank,10)
alpha.columns = ['alpha105']
return alpha
@timer
def alpha106(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
alpha = pd.DataFrame(data['close'] - data['close_delay'])
alpha.columns = ['alpha106']
return alpha
@timer
def alpha107(self):
Open = self.open
high = self.high
close = self.close
low = self.low
high_delay = Delay(high,1)
close_delay = Delay(close,1)
low_delay = Delay(low,1)
data = pd.concat([high_delay,close_delay,low_delay,Open], axis = 1, join = 'inner')
data.columns = ['high_delay','close_delay','low_delay','open']
r1 = Rank(pd.DataFrame(data['open'] - data['high_delay']))
r2 = Rank(pd.DataFrame(data['open'] - data['close_delay']))
r3 = Rank(pd.DataFrame(data['open'] - data['low_delay']))
alpha = -1 * r1 * r2 * r3
alpha.columns = ['alpha107']
return alpha
@timer
def alpha108(self):
high = self.high
volume = self.volume
vwap = self.vwap
high_min = TsMin(high,2)
data1 = pd.concat([high,high_min], axis = 1, join = 'inner')
data1.columns = ['high','high_min']
r1 = Rank(pd.DataFrame(data1['high'] - data1['high_min']))
volume_mean = Mean(volume,120)
rank = pd.concat([vwap,volume_mean],axis = 1, join = 'inner')
corr = Corr(rank,6)
r2 = Rank(corr)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = r['r1'] * r['r2'] * -1
alpha.columns = ['alpha108']
return alpha
@timer
def alpha109(self):
high = self.high
low = self.low
data = pd.concat([high,low],axis = 1, join = 'inner')
temp = SMA(pd.DataFrame(data['High'] - data['Low']),10,2)
sma = SMA(temp,10,2)
sma_temp = pd.concat([temp,sma],axis = 1, join = 'inner')
sma_temp.columns = ['temp','sma']
alpha = pd.DataFrame(sma_temp['temp']/sma_temp['sma'])
alpha.columns = ['alpha109']
return alpha
@timer
def alpha110(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([high,low,close_delay], axis = 1, join = 'inner')
data['max1'] = data['High'] - data['close_delay']
data['max2'] = data['close_delay'] - data['Low']
data['max1'][data['max1'] < 0] = 0
data['max2'][data['max2'] < 0] = 0
s1 = Sum(pd.DataFrame(data['max1']),20)
s2 = Sum(pd.DataFrame(data['max2']),20)
s = pd.concat([s1,s2], axis = 1 , join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'])
alpha.columns = ['alpha110']
return alpha
@timer
def alpha111(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
temp = pd.DataFrame(data['Vol'] * (2 * data['Close'] - data['Low'] - data['High'])\
/(data['High'] - data['Low']))
sma1 = SMA(temp,11,2)
sma2 = SMA(temp,4,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] - sma['sma2'])
alpha.columns = ['alpha111']
return alpha
@timer
def alpha112(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close, close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = 1
data['temp'][data['close'] > data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha112']
return alpha
@timer
def alpha113(self):
close = self.close
volume = self.volume
close_delay = Delay(close,5)
close_delay_mean = Mean(close_delay,20)
data1 = pd.concat([close,volume],axis = 1, join = 'inner')
corr = Corr(data1,2)
r1 = Rank(close_delay_mean)
data2 = pd.concat([r1,corr], axis = 1, join = 'inner')
data2.columns = ['r1','corr']
r1 = pd.DataFrame(data2['r1'] * data2['corr'])
close_sum5 = Sum(close,5)
close_sum20 = Sum(close,20)
data3 = pd.concat([close_sum5,close_sum20],axis = 1, join = 'inner')
corr2 = Corr(data3,2)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha113']
return alpha
@timer
def alpha114(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
vwap = self.vwap
close_mean = Mean(close,5)
data = pd.concat([high,low,close_mean], axis = 1, join = 'inner')
data.columns = ['high','low','close_mean']
temp = pd.DataFrame(data['high'] - data['low'] / data['close_mean'])
temp_delay = Delay(temp,2)
r1 = TsRank(temp_delay,5)
temp1 = pd.concat([temp,vwap,close], axis = 1, join = 'inner')
temp1.columns = ['temp','vwap','close']
tep = pd.DataFrame(temp1['temp']/(temp1['vwap'] - temp1['close']))
r2 = TsRank(volume,5)
data2 = pd.concat([r2,tep], axis = 1, join = 'inner')
data2.columns = ['r2','tep']
tep1 = pd.DataFrame(data2['r2']/data2['tep'])
r3 = TsRank(tep1,5)
r = pd.concat([r1,r3],axis = 1, join = 'inner')
r.columns = ['r1','r3']
alpha = pd.DataFrame(r['r1'] + r['r3'])
alpha.columns = ['alpha114']
return alpha
@timer
def alpha115(self):
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,30)
price = pd.concat([high,low], axis = 1, join = 'inner')
price.columns = ['high','low']
price_temp = price['high'] * 0.9 + price['low'] * 0.1
data = pd.concat([price_temp,volume_mean],axis = 1, join = 'inner')
corr = Corr(data,10)
r1 = Rank(corr)
data2 = pd.concat([high,low], axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
temp_r = TsRank(temp,4)
volume_r = TsRank(volume,10)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,7)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha115']
return alpha
@timer
def alpha116(self):
close = self.close
alpha = RegResi(0,close,None,20)
alpha.columns = ['alpha116']
return alpha
@timer
def alpha117(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
ret = self.ret
r1 = TsRank(volume,32)
data1 = pd.concat([close,high,low],axis = 1, join = 'inner')
r2 = TsRank(pd.DataFrame(data1['Close'] + data1['High'] - data1['Low']),16)
r3 = TsRank(ret,32)
r = pd.concat([r1,r2,r3], axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(r['r1'] * (1 - r['r2']) * (1 - r['r3']))
alpha.columns = ['alpha117']
return alpha
@timer
def alpha118(self):
high = self.high
low = self.low
Open = self.open
data = pd.concat([high,low,Open], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame(data['High'] - data['Open']),20)
s2 = Sum(pd.DataFrame(data['Open'] - data['Low']),20)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha118']
return alpha
@timer
def alpha119(self):
Open = self.open
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,5)
volume_mean_sum = Sum(volume_mean,26)
data1 = pd.concat([vwap,volume_mean_sum],axis = 1, join = 'inner')
corr1 = Corr(data1,5)
corr1_decay = DecayLinear(corr1,7)
r1 = Rank(corr1_decay)
open_r = Rank(Open)
volume_mean2 = Mean(volume,15)
volume_mean2_r = Rank(volume_mean2)
data2 = pd.concat([open_r, volume_mean2_r], axis = 1, join = 'inner')
corr2 = Corr(data2,21)
corr2_min = TsMin(corr2,9)
corr2_min_r = TsRank(corr2_min,7)
corr_min_r_decay = DecayLinear(corr2_min_r,8)
r2 = Rank(corr_min_r_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha119']
return alpha
@timer
def alpha120(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close], axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vwap'] - data['Close']))
r2 = Rank(pd.DataFrame(data['Vwap'] + data['Close']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha120']
return alpha
@timer
def alpha121(self):
vwap = self.vwap
volume = self.volume
vwap_r = TsRank(vwap,20)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,2)
data = pd.concat([vwap_r,volume_mean_r], axis = 1, join = 'inner')
corr= Corr(data,18)
temp = TsRank(corr,3)
vwap_min = TsMin(vwap,12)
data2 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data2.columns = ['vwap','vwap_min']
rank = Rank(pd.DataFrame(data2['vwap'] - data2['vwap_min']))
data3 = pd.concat([rank,temp],axis = 1, join = 'inner')
data3.columns = ['rank','temp']
alpha = pd.DataFrame(np.power(data3['rank'],data3['temp']) * -1)
alpha.columns = ['alpha121']
return alpha
@timer
def alpha122(self):
close = self.close
close_ln = pd.DataFrame(np.log(close))
temp1 = SMA(close_ln,13,2)
sma1 = SMA(temp1,13,2)
sma2 = SMA(sma1,13,2)
sma3 = SMA(sma2,13,2)
sma3_delay = Delay(sma3,1)
data = pd.concat([sma3,sma3_delay],axis = 1, join = 'inner')
data.columns = ['sma','sma_delay']
alpha = pd.DataFrame(data['sma']/data['sma_delay'])
alpha.columns = ['alpha122']
return alpha
@timer
def alpha123(self):
volume = self.volume
high = self.high
low = self.low
data1 = pd.concat([high,low], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame((data1['High'] + data1['Low'])/2),20)
volume_mean = Mean(volume,60)
s2 = Sum(volume_mean,20)
data2 = pd.concat([s1,s2], axis = 1, join = 'inner')
corr1 = Corr(data2,9)
data3 = pd.concat([low,volume], axis = 1, join = 'inner')
corr2 = Corr(data3,6)
corr1_r = Rank(corr1)
corr2_r = Rank(corr2)
data = pd.concat([corr1_r,corr2_r], axis = 1, join = 'inner')
data.columns = ['r1','r2']
data['alpha'] = -1
data['alpha'][data['r1'] >= data['r2']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha123']
return alpha
@timer
def alpha124(self):
close = self.close
vwap = self.vwap
close_max = TsMax(close,30)
close_max_r = Rank(close_max)
close_max_r_decay = DecayLinear(close_max_r,2)
close_max_r_decay.columns = ['decay']
data = pd.concat([close,vwap,close_max_r_decay], axis = 1, join ='inner')
alpha = pd.DataFrame((data['Close'] - data['Vwap'])/data['decay'])
alpha.columns = ['alpha124']
return alpha
@timer
def alpha125(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,80)
data1 = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr1 = Corr(data1,17)
data2 = pd.concat([close,vwap], axis = 1, join = 'inner')
temp2 = pd.DataFrame(0.5*(data2['Close'] + data2['Vwap']))
temp2_delta = Delta(temp2,3)
corr1_decay = DecayLinear(corr1,20)
r1 = Rank(corr1_decay)
temp2_delta_decay = DecayLinear(temp2_delta,16)
r2 = Rank(temp2_delta_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha125']
return alpha
@timer
def alpha126(self):
close = self.close
high = self.high
low = self.low
data = pd.concat([close,high,low], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] + data['High'] + data['Low'])/3)
alpha.columns = ['alpha126']
return alpha
@timer
def alpha127(self):
close = self.close
close_max = TsMax(close,12)
data = pd.concat([close,close_max], axis = 1, join = 'inner')
data.columns = ['close','close_max']
alpha = pd.DataFrame((data['close'] - data['close_max'])/data['close_max'])
alpha.columns = ['alpha127']
return alpha
@timer
def alpha128(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
data = pd.concat([close,high,low,volume], axis = 1, join = 'inner')
data['temp1'] = (data['Close'] + data['Low'] + data['High'])/3
data['temp2'] = data['temp1'] * data['Vol']
data['temp3'] = data['temp1'] * data['Vol']
temp_delay = Delay(pd.DataFrame(data['temp1']),1)
temp_delay.columns = ['temp_decay']
data = pd.concat([data,temp_delay], axis = 1, join = 'inner')
data['temp2'][data['temp1'] < data['temp_decay']] = 0
data['temp3'][data['temp1'] > data['temp_decay']] = 0
s1 = Sum(pd.DataFrame(data['temp2']),14)
s2 = Sum(pd.DataFrame(data['temp3']),14)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(100 - 100/(1+ s['s1']/s['s2']))
alpha.columns = ['alpha128']
return alpha
@timer
def alpha129(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['abs'] = np.abs(data['close'] - data['close_delay'])
data['temp'] = data['abs']
data['temp'][data['close'] < data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha129']
return alpha
@timer
def alpha130(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,40)
data1 = pd.concat([high,low],axis = 1, join = 'inner')
temp1 = pd.DataFrame((data1['High'] + data1['Low'])/2)
rank1 = pd.concat([temp1,volume_mean], axis = 1, join = 'inner')
corr = Corr(rank1,9)
close_r = Rank(close)
volume_r = Rank(volume)
data2 = pd.concat([close_r,volume_r],axis = 1, join = 'inner')
corr2 = Corr(data2,7)
corr_decay = DecayLinear(corr,10)
r1 = Rank(corr_decay)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha130']
return alpha
@timer
def alpha131(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,50)
data1 = pd.concat([close,volume_mean], axis = 1, join = 'inner')
corr = Corr(data1,18)
vwap_delta = Delta(vwap,1)
temp2 = TsRank(corr,18)
data2 = pd.concat([vwap_delta,temp2],axis = 1, join = 'inner')
data2.columns = ['vwap_delta','temp2']
temp3 = np.power(data2['vwap_delta'],data2['temp2'])
alpha = Rank(pd.DataFrame(temp3))
alpha.columns = ['alpha131']
return alpha
@timer
def alpha132(self):
amt = self.amt
alpha = Mean(amt,20)
alpha.columns = ['alpha132']
return alpha
@timer
def alpha133(self):
low = self.low
high = self.high
highday = Highday(high,20)
lowday = Lowday(low,20)
data = pd.concat([highday,lowday],axis = 1, join = 'inner')
data.columns = ['highday','lowday']
alpha = (20 - data['highday']/20.0) * 100 - (20 - data['lowday']/20.0) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha133']
return alpha
@timer
def alpha134(self):
close = self.close
volume = self.volume
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,volume,close_delay], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] - data['close_delay'])/data['close_delay'])
alpha.columns = ['alpha134']
return alpha
@timer
def alpha135(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1 , join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
alpha = SMA(temp_delay,20,1)
alpha.columns = ['alpha135']
return alpha
@timer
def alpha136(self):
volume = self.volume
Open = self.open
ret = self.ret
ret_delta = Delta(ret,3)
ret_delta_r = Rank(ret_delta)
data = pd.concat([Open,volume],axis = 1, join = 'inner')
corr = Corr(data,10)
data_temp = pd.concat([ret_delta_r,corr],axis = 1, join = 'inner')
data_temp.columns = ['ret_delta','corr']
alpha = pd.DataFrame(-1 * data_temp['ret_delta'] * data_temp['corr'])
alpha.columns = ['alpha136']
return alpha
@timer
def alpha137(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
alpha = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha.columns = ['alpha137']
return alpha
@timer
def alpha138(self):
vwap = self.vwap
volume = self.volume
low = self.low
data1 = pd.concat([low,vwap], axis = 1, join = 'inner')
temp1 = pd.DataFrame(data1['Low'] * 0.7 + data1['Vwap'] * 0.3)
temp1_delta = Delta(temp1,3)
temp1_delta_decay = DecayLinear(temp1_delta,20)
r1 = Rank(temp1_delta_decay)
low_r = TsRank(low,8)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,17)
data2 = pd.concat([low_r,volume_mean_r],axis = 1, join = 'inner')
corr = Corr(data2,5)
corr_r = TsRank(corr,19)
corr_r_decay = DecayLinear(corr_r,16)
r2 = TsRank(corr_r_decay,7)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha138']
return alpha
@timer
def alpha139(self):
Open = self.open
volume = self.volume
data = pd.concat([Open,volume], axis = 1, join = 'inner')
alpha = -1 * Corr(data,10)
alpha.columns = ['alpha139']
return alpha
@timer
def alpha140(self):
Open = self.open
volume = self.volume
high = self.high
low = self.low
close = self.close
open_r = Rank(Open)
low_r = Rank(low)
high_r = Rank(high)
close_r = Rank(close)
data1 = pd.concat([open_r,low_r,high_r,close_r],axis = 1, join = 'inner')
data1.columns = ['open_r','low_r','high_r','close_r']
temp = pd.DataFrame(data1['open_r'] + data1['low_r'] - \
(data1['high_r'] + data1['close_r']))
close_r_temp = TsRank(close,8)
volume_mean = Mean(volume,70)
volume_mean_r = TsRank(volume_mean,20)
data2 = pd.concat([close_r_temp,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data2,8)
temp_decay = DecayLinear(temp,8)
r1 = Rank(temp_decay)
corr_decay = DecayLinear(corr,7)
r2 = TsRank(corr_decay,3)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
alpha = pd.DataFrame(np.min(r))
alpha.columns = ['alpha140']
return alpha
@timer
def alpha141(self):
volume = self.volume
high = self.high
volume_mean = Mean(volume,15)
high_r = Rank(high)
volume_mean_r = Rank(volume_mean)
data = pd.concat([high_r,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data,9)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha141']
return alpha
@timer
def alpha142(self):
close = self.close
volume = self.volume
close_r = TsRank(close,10)
r1 = Rank(close_r)
close_delta = Delta(close,1)
close_delta_delta = Delta(close_delta,1)
r2 = Rank(close_delta_delta)
volume_mean = Mean(volume,20)
data = pd.concat([volume,volume_mean], axis = 1, join = 'inner')
data.columns = ['v','v_m']
temp = pd.DataFrame(data['v']/data['v_m'])
temp_r = TsRank(temp,5)
r3 = Rank(temp_r)
r = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(- 1* r['r1'] * r['r2'] * r['r3'])
alpha.columns= ['alpha142']
return alpha
@timer
def alpha143(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] - data['close_delay'])/data['close_delay'])
temp.columns= ['temp']
data_temp = pd.concat([data,temp],axis = 1, join = 'inner')
data_temp['temp'][data_temp['close'] <= data_temp['close_delay']] = 1
temp_unstack = data_temp['temp'].unstack()
temp_unstack.iloc[0,:] = 1
df = np.cumprod(temp_unstack,axis = 0)
alpha = df.stack()
alpha.columns = ['alpha143']
return alpha
@timer
def alpha144(self):
close = self.close
amt = self.amt
close_delay = Delay(close,1)
data = pd.concat([close,close_delay,amt], axis = 1, join = 'inner')
data.columns = ['close','close_delay','amt']
data['temp'] = np.abs(data['close']/data['close_delay'] - 1)/data['amt']
data['sign'] = 1
data['sign'][data['close'] >= data['close_delay']] = 0
tep1 = Sum(pd.DataFrame(data['sign'] * data['temp']),20)
tep2 = Count(0,pd.DataFrame(data['close_delay']),pd.DataFrame(data['close']),20)
data2 = pd.concat([tep1,tep2], axis = 1, join = 'inner')
data2.columns = ['tep1','tep2']
alpha = pd.DataFrame(data2['tep1']/data2['tep2'])
alpha.columns = ['alpha144']
return alpha
@timer
def alpha145(self):
volume = self.volume
volume_mean9 = Mean(volume,9)
volume_mean26 = Mean(volume,26)
volume_mean12 = Mean(volume,12)
data = pd.concat([volume_mean9,volume_mean26,volume_mean12], axis = 1, join = 'inner')
data.columns = ['m9','m26','m12']
alpha = pd.DataFrame((data['m9'] - data['m26'])/data['m12'] * 100)
alpha.columns = ['alpha145']
return alpha
@timer
def alpha146(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] -data['close_delay'])/data['close_delay'])
sma1 = SMA(temp,61,2)
data2 = pd.concat([temp,sma1], axis = 1, join = 'inner')
data2.columns = ['temp1','sma1']
data2['temp2'] = data2['temp1'] - data2['sma1']
temp2_mean = Mean(pd.DataFrame(data2['temp2']),20)
sma2 = SMA(pd.DataFrame(data2['temp1'] - data2['temp2']),61,2)
data_temp = pd.concat([temp2_mean,pd.DataFrame(data2['temp2']),sma2], axis = 1 , join = 'inner')
data_temp.columns = ['temp2_mean','temp2','sma2']
alpha = data_temp['temp2_mean'] * data_temp['temp2'] / data_temp['sma2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha146']
return alpha
@timer
def alpha147(self):
close = self.close
close_mean = Mean(close,12)
alpha = RegBeta(0,close_mean,None,12)
alpha.columns = ['alpha147']
return alpha
@timer
def alpha148(self):
Open = self.open
volume = self.volume
volume_mean = Mean(volume,60)
volume_mean_s = Sum(volume_mean,9)
data = pd.concat([Open,volume_mean_s],axis = 1, join = 'inner')
corr = Corr(data,6)
r1 = Rank(corr)
open_min = TsMin(Open,14)
data2 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data2.columns = ['open','open_min']
r2 = Rank(pd.DataFrame(data2['open'] - data2['open_min']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = -1
r['alpha'][r['r1'] > r['r2']] = 0
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha148']
return alpha
@timer
def alpha149(self):
close = self.close
close_index = self.close_index
close_delay = Delay(close,1)
close_index_delay = Delay(close_index,1)
data_index = pd.concat([close_index,close_index_delay], axis = 1, join = 'inner')
data_index.columns = ['close','close_delay']
data_index['delta'] = data_index['close']/data_index['close_delay'] - 1
data_index['judge'] = 1
data_index['judge'][data_index['close'] >= data_index['close_delay']] = 0
data_index['delta'][data_index['judge'] == 0] = np.nan
# index_delta_unstack = index_delta_unstack.dropna()
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['delta'] = data['close'] / data['close_delay'] - 1
df1 = pd.DataFrame(data['delta'])
df2 = pd.DataFrame(data_index['delta'])
alpha = RegBeta(1,df1,df2,252)
alpha.columns = ['alpha149']
return alpha
@timer
def alpha150(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
alpha = (data['Close'] + data['High'] + data['Low'])/3 * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha150']
return alpha
@timer
def alpha151(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close'] - data['close_delay'])
alpha = SMA(temp,20,1)
alpha.columns = ['alpha151']
return alpha
@timer
def alpha152(self):
close = self.close
close_delay = Delay(close,9)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
sma1 = SMA(temp_delay,9,1)
sma1_delay = Delay(sma1,1)
sma1_delay_mean1 = Mean(sma1_delay,12)
sma1_delay_mean2 = Mean(sma1_delay,26)
data_temp = pd.concat([sma1_delay_mean1,sma1_delay_mean2],axis = 1, join = 'inner')
data_temp.columns = ['m1','m2']
alpha = SMA(pd.DataFrame(data_temp['m1'] - data_temp['m2']),9,1)
alpha.columns = ['alpha152']
return alpha
@timer
def alpha153(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close_mean3, close_mean6, close_mean12, close_mean24], axis = 1 ,join ='inner')
alpha = pd.DataFrame(np.mean(data, axis = 1))
alpha.columns = ['alpha153']
return alpha
@timer
def alpha154(self):
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,180)
data = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr = Corr(data,18)
vwap_min = TsMin(vwap,16)
data1 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data1.columns = ['vwap','vwap_min']
temp = pd.DataFrame(data1['vwap'] - data1['vwap_min'])
data_temp = pd.concat([corr,temp], axis = 1, join = 'inner')
data_temp.columns = ['corr','temp']
data_temp['alpha'] = 1
data_temp['alpha'][data_temp['corr'] >= data_temp['temp']] = 0
alpha = pd.DataFrame(data_temp['alpha'])
alpha.columns = ['alpha154']
return alpha
@timer
def alpha155(self):
volume = self.volume
sma1 = SMA(volume,13,2)
sma2 = SMA(volume,26,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3], axis = 1 ,join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(data['temp'] - data['sma'])
alpha.columns = ['alpha155']
return alpha
@timer
def alpha156(self):
vwap = self.vwap
Open = self.open
low = self.low
vwap_delta = Delta(vwap,5)
vwap_delta_decay = DecayLinear(vwap_delta,3)
r1 = Rank(vwap_delta_decay)
data1 = pd.concat([Open,low],axis = 1, join = 'inner')
temp = -1 * Delta(pd.DataFrame(data1['Open'] * 0.15 + data1['Low'] * 0.85),2)
temp_decay = DecayLinear(temp,3)
r2 = Rank(temp_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(- 1 *np.max(r, axis = 1))
alpha.columns = ['alpha156']
return alpha
@timer
def alpha157(self):
close = self.close
ret = self.ret
close_delta = Delta(close,5)
close_delta_r = Rank(Rank(close_delta) * -1)
r1 = TsMin(close_delta_r,2)
ret_delay = Delay(-1 * ret,6)
r2 = TsRank(ret_delay,5)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
temp = pd.DataFrame(r['r1'] + r['r2'])
alpha = TsMin(temp,5)
alpha.columns = ['alpha157']
return alpha
@timer
def alpha158(self):
high = self.high
low = self.low
close = self.close
temp = SMA(close,15,2)
temp.columns = ['temp']
data = pd.concat([high,low,close,temp],axis = 1 , join = 'inner')
alpha =(data['High'] + data['Low'] - 2 * data['temp'] )/data['Close']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha158']
return alpha
@timer
def alpha159(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
data1 = pd.concat([low,close_delay],axis = 1, join = 'inner')
data2 = pd.concat([high, close_delay], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.min(data1,axis = 1))
temp2= pd.DataFrame(np.max(data2,axis = 1))
temp = pd.concat([temp1,temp2], axis = 1 ,join = 'inner')
temp.columns = ['temp1','temp2']
temp1_sum6 = Sum(temp1,6)
temp1_sum12 = Sum(temp1,12)
temp1_sum24 = Sum(temp1,24)
tep = pd.DataFrame(temp['temp2'] - temp['temp1'])
s6 = Sum(tep,6)
s12 = Sum(tep,12)
s24 = Sum(tep,24)
data3 = pd.concat([temp1_sum6,temp1_sum12,temp1_sum24,s6,s12,s24], axis = 1 ,join = 'inner')
data3.columns = ['ts6','ts12','ts24','s6','s12','s24']
temp3 = pd.DataFrame(data3['ts6']/data3['s6'] * 12 * 24 + data3['ts12']/data3['s12'] * 6 * 24 \
+ data3['ts24']/data3['s24'] * 6 * 24)
alpha = temp3 / (6*12 + 6*24 + 12*24) * 100
alpha.columns = ['alpha159']
return alpha
@timer
def alpha160(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_std','close_delay']
data['close_std'][data['close'] >= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data['close_std']),20,1)
alpha.columns = ['alpha160']
return alpha
@timer
def alpha161(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data1 = pd.concat([high,low],axis = 1 , join = 'inner')
diff = pd.DataFrame(data1['High'] - data1['Low'])
data2 = pd.concat([close_delay,high], axis = 1, join ='inner')
abs1 = pd.DataFrame(np.abs(data2['close_delay'] - data2['High']))
data3 = pd.concat([diff,abs1], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.max(data3,axis = 1))
data4 = pd.concat([close_delay,low],axis = 1, join = 'inner')
temp2 = pd.DataFrame(np.abs(data4['close_delay'] -data4['Low']))
data = pd.concat([temp1,temp2],axis =1 , join = 'inner')
data.columns = ['temp1','temp2']
temp = pd.DataFrame(np.max(data, axis = 1))
alpha = Mean(temp,12)
alpha.columns = ['alpha161']
return alpha
@timer
def alpha162(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['max']= data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
temp1 = SMA(pd.DataFrame(data['max']),12,1)
temp2 = SMA(pd.DataFrame(data['abs']),12,1)
data1 = pd.concat([temp1,temp2], axis = 1, join = 'inner')
data1.columns = ['temp1','temp2']
tep = pd.DataFrame(data1['temp1']/data1['temp2'])
temp3 = TsMin(tep,12)
temp4 = TsMax(tep,12)
data_temp = pd.concat([tep,temp3,temp4], axis = 1, join = 'inner')
data_temp.columns = ['tep','temp3','temp4']
alpha = (data_temp['tep'] - data_temp['temp3']/data_temp['temp4']) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha162']
return alpha
@timer
def alpha163(self):
low = self.low
high = self.high
volume = self.volume
ret = self.ret
vwap = self.vwap
volume_mean = Mean(volume,20)
data = pd.concat([high,low,vwap,ret,volume_mean],axis = 1, join = 'inner')
data.columns = ['high','low','vwap','ret','volume_mean']
temp = pd.DataFrame(-1 *data['ret'] * data['volume_mean'] *data['vwap'] * \
(data['high'] - data['low']))
alpha = Rank(temp)
alpha.columns = ['alpha163']
return alpha
@timer
def alpha164(self):
close = self.close
high = self.high
low = self.low
close_delay = Delay(close,1)
data = pd.concat([close,high,low,close_delay],axis = 1, join = 'inner')
data.columns = ['close','high','low','close_delay']
data['temp'] = 1/(data['close'] - data['close_delay'])
data_min = TsMin(pd.DataFrame(data['temp']),12)
data_min.columns = ['min']
data2 = pd.concat([data,data_min],axis = 1, join = 'inner')
data2['tep'] = data2['temp'] - data2['min']/(data2['high'] - data2['low'])
data2['tep'][data['close'] <= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data2['tep']) * 100,13,2)
alpha.columns = ['alpha164']
return alpha
@timer
def alpha165(self):
close = self.close
close_mean = Mean(close,48)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame(data['close'] - data['close_mean'])
temp_sum = Sum(temp,48)
temp_sum_min = TsMin(temp_sum,48)
temp_sum_max = TsMax(temp_sum,48)
close_std = STD(close,48)
data_temp = pd.concat([temp_sum_min,temp_sum_max,close_std], axis = 1, join = 'inner')
data_temp.columns = ['min','max','std']
alpha = (data_temp['max'] - data_temp['min'])/data_temp['std']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha165']
return alpha
@timer
def alpha166(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_mean = Mean(temp,20)
data1 = pd.concat([temp,temp_mean], axis = 1, join = 'inner')
data1.columns = ['temp','temp_mean']
temp2 = Sum(pd.DataFrame(data1['temp'] - data1['temp_mean']),20) * 20 * 19
temp3 = Sum(temp,20) * 19 * 18
data2 = pd.concat([temp2,temp3], axis = 1, join = 'inner')
data2.columns = ['temp2','temp3']
alpha = np.power(data2['temp2'],1.5)/np.power(data2['temp3'],1.5)
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha166']
return alpha
@timer
def alpha167(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = data['close'] - data['close_delay']
data['temp'][data['close'] <= data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha167']
return alpha
@timer
def alpha168(self):
volume = self.volume
volume_mean = Mean(volume,20)
data = pd.concat([volume,volume_mean], axis = 1, join = 'inner')
data.columns = ['volume','volume_mean']
alpha = data['volume']/data['volume_mean'] * -1
alpha.columns = ['alpha168']
return alpha
@timer
def alpha169(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp1 = pd.DataFrame(data['close'] - data['close_delay'])
sma = SMA(temp1,9,1)
temp2 = Delay(sma,1)
temp2_mean12 = Mean(temp2,12)
temp2_mean26 = Mean(temp2,26)
data2 = pd.concat([temp2_mean12,temp2_mean26], axis = 1, join ='inner')
data2.columns = ['mean1','mean2']
alpha = SMA(
|
pd.DataFrame(data2['mean1'] - data2['mean2'])
|
pandas.DataFrame
|
# %%
import os
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import pandas as pd
import seaborn as sns
from IPython.core.interactiveshell import InteractiveShell
# Setting styles
InteractiveShell.ast_node_interactivity = "all"
sns.set(style="whitegrid", color_codes=True, rc={"figure.figsize": (12.7, 9.27)})
# %%
df = pd.read_csv(
os.path.join("data", "processed", "data_sentiment.csv"), low_memory=False
)
# %%
df.sentiment.describe()
sns.distplot(df.sentiment)
plt.title("Afinn sentiment distribution")
plt.show()
# %%
df.sentiment_norm.describe()
sns.distplot(df.sentiment_norm)
plt.title("Afinn normalized sentiment distribution")
plt.show()
# %%
df.sentiment_abs.describe()
sns.distplot(df.sentiment_abs)
plt.title("Afinn absolute values sentiment distribution")
plt.show()
# %%
df.sentiment_abs_norm.describe()
sns.distplot(df.sentiment_abs_norm)
plt.title("Afinn absolute values normalized sentiment distribution")
plt.show()
# %%
ax = sns.scatterplot(df.sentiment, df.sentiment_norm, alpha=0.005)
plt.title("Relationship between Afinn and Afinn normalized")
plt.show()
# %% [markdown]
# # What do the most positive and negative messages look like
#
# * For pure afinn sentiment values it is clear that really long and messages get the
# top spots
# * With normalized (divided by the number of words) sentiment values short and clear
# messages get the top spots. This seems much better
# * The normalized sentiment values have no outliers and the values seems more
# realistic and intuitive. We only used normalized values for the rest of the
# analysis
# %%
print("AFINN")
print("")
print("POSITIVE:")
for i, text in enumerate(
df.sort_values(by="sentiment", ascending=False)["message"][:5]
):
print(i + 1, "most positive")
print(text[:200])
print("")
print("")
print("NEGATIVE:")
for i, text in enumerate(df.sort_values(by="sentiment", ascending=True)["message"][:5]):
print(i + 1, "most negative")
print(text[:200])
print("")
print("AFINN normalized")
print("")
print("POSITIVE:")
for i, text in enumerate(
df.sort_values(by="sentiment_norm", ascending=False)["message"][:5]
):
print(i + 1, "most positive")
print(text[:200])
print("")
print("")
print("NEGATIVE:")
for i, text in enumerate(
df.sort_values(by="sentiment_norm", ascending=True)["message"][:5]
):
print(i + 1, "most negative")
print(text[:200])
print("")
# %%
print("Most emotional:")
for i, text in enumerate(
df.sort_values(by="sentiment_abs_norm", ascending=False)["message"][:5]
):
print(i + 1, "most emotional")
print(text[:200])
print("")
print("")
print("Least emotional:")
for i, text in enumerate(
df.sort_values(by="sentiment_abs_norm", ascending=True)["message"][:5]
):
print(i + 1, "least emotional")
print(text[:200])
print("")
# %%
afinn_positive_wc = WordCloud(background_color="white").generate(
" ".join(df.sort_values(by="sentiment_norm", ascending=False)["message"][:500])
)
plt.imshow(afinn_positive_wc)
plt.title("Wordcloud of 500 most positive messages")
plt.show()
afinn_negative_wc = WordCloud(background_color="white").generate(
" ".join(df.sort_values(by="sentiment_norm", ascending=True)["message"][:500])
)
plt.imshow(afinn_negative_wc)
plt.title("Wordcloud of 500 most negative messages")
plt.show()
afinn_emotional_wc = WordCloud(background_color="white").generate(
" ".join(df.sort_values(by="sentiment_abs_norm", ascending=True)["message"][:500])
)
plt.imshow(afinn_emotional_wc)
plt.title("Wordcloud of 500 most emotional messages")
plt.show()
# %%
df["created_at"] = pd.to_datetime(df.created_at)
df["updated_at"] = pd.to_datetime(df.updated_at)
df["created_at_date"] = df.created_at.dt.date
df["updated_at_date"] = df.updated_at.dt.date
df["created_at_month"] = df["created_at"] -
|
pd.offsets.MonthBegin(1, normalize=True)
|
pandas.offsets.MonthBegin
|
import numpy as np
import openpyxl
import pandas as pd
from ortoolpy import addbinvars, addvars
from pulp import LpProblem, lpSum, value, lpDot
from make_data import FILE_NAME, GROUP_COUNT, USER_COUNT
RESULT_FILE = 'result_optimization.xlsx'
def divide_users():
# ใใใฉใซใใ ใจ 54 x 54 ใง่ชญใพใใใฎใงใไธ่ฆ่กใฏ่ชญใฟ่พผใพใชใใใใซใใ
skip_rows = [i + GROUP_COUNT for i in range(1, USER_COUNT - GROUP_COUNT + 1)]
df = pd.read_excel(FILE_NAME, header=0, index_col=0, skiprows=skip_rows)
# ๅฝ็ชๅๆฐ
event_count = df.shape[0]
# print(f'{type(box_size)}: {box_size}')
# => <class 'int'>: 18
# ใฆใผใถๆฐ
user_count = df.shape[1]
# print(f'{type(user_size)}: {user_size}')
# => <class 'int'>: 54
# ๆฐ็ใขใใซ
model = LpProblem()
# ๅคๆฐใๆบๅ(ๅฝ็ช/้ๅฝ็ชใฎ2ๅคใชใฎใงใ0-1ๅคๆฐใชในใ)
# https://docs.pyq.jp/python/math_opt/pdopt.html
var_schedule = np.array(addbinvars(event_count, user_count))
df['ๅฟ
่ฆไบบๆฐๅทฎ'] = addvars(event_count)
# ้ใฟ
weight = 1
# ็ฎ็้ขๆฐใฎๅฒใๅฝใฆ
model += lpSum(df.ๅฟ
่ฆไบบๆฐๅทฎ) * weight
# ๅถ็ด
# 1ๅฝ็ชใใใ3ไบบ
for idx, row in df.iterrows():
model += row.ๅฟ
่ฆไบบๆฐๅทฎ >= (lpSum(var_schedule[row.name]) - 3)
model += row.ๅฟ
่ฆไบบๆฐๅทฎ >= -(lpSum(var_schedule[row.name]) - 3)
# ไธไบบใใใ1ๅๅฝ็ชใใใฐใใ
for user in range(user_count):
scheduled = [var_schedule[event, user] for event in range(event_count)]
model += lpSum(pd.Series(scheduled)) <= 1
# ๅฝ็ชๅฏ่ฝใชๆใ ใๅฒใๅฝใฆใ
df_rev = df[df.columns].apply(lambda r: 1 - r[df.columns], 1)
for (_, d), (_, s) in zip(df_rev.iterrows(),
|
pd.DataFrame(var_schedule)
|
pandas.DataFrame
|
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import matplotlib as mpl
#define function to fit to data
from scipy.optimize import curve_fit
from scipy.stats import t
# Read the file
xls = pd.ExcelFile('Calibration_data_all.xlsx')
sn = xls.sheet_names
names = ['SU1', 'SU2', 'SU3', 'SU5']
d = {}
for name in sn:
d[name] = pd.read_excel(xls,
sheet_name = name,
header = 3, #use the first row (index 1) as column headings
#parse_dates={'Datetime'}, #convert text to dates/times where possible
#index_col = 9 #read dates from column 9 into the index
)
#set up the figures and subfigures (aka axes)
fig, axs = plt.subplots(2,2, sharex = False)
plt.rc('xtick',labelsize=8)
plt.rc('ytick',labelsize=8)
location = ([0,0], [0,1], [1,0], [1,1])
#l = {}
#for i in range(4):
# x = d[sn[i]]['1/R (uS)']
# y = d[sn[i]]['Kt']
# l[sn[i]] = axs[location[i]].plot(x,y,
# color = 'b',
# marker = 'o',
# linestyle = 'none',
# label = sn[i],
# )
def zero_int(x, m):
return m*x
def linear(x,m,b):
return m*x+b
def power(x,a,b):
return a*x**b
def power_with_offset(x,a,b,c):
return a*(x+c)**b
raw_data_2021 = {'SU1':d['SU1 3-17-21'], 'SU2':d['SU2 3-17-21'], 'SU3':d['SU3 3-17-21'], 'SU5':d['SU5 3-17-21']}
raw_data_2020 = {'SU1':d['SU1'], 'SU2':d['SU2'], 'SU3':d['SU3'], 'SU4':d['SU4']}
for i,ax in enumerate(fig.axes):
if names[i] in raw_data_2020:
y_2020 = raw_data_2020[names[i]]['1/R (uS)']/1000
x_2020 = raw_data_2020[names[i]]['Kt']/1000
y_2021 = raw_data_2021[names[i]]['1/R (uS)']/1000
x_2021 = raw_data_2021[names[i]]['Kt']/1000
log_x = np.log(x_2021)
log_y = np.log(y_2021)
ax.tick_params(axis='both', which = 'major', labelsize =8)
ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:,.0f}'))
ax.xaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:,.0f}'))
#m,b = np.polyfit(x,y,1)
print(names[i])
popt0, pcov0 = curve_fit(zero_int, x_2021, y_2021)
popt1, pcov1 = curve_fit(linear, x_2021, y_2021)
popt2, pcov2 = curve_fit(linear, log_x, log_y)
if i == 0:
popt3, pcov3 = curve_fit(power_with_offset, x_2021, y_2021)
m0 = popt0
(m1,b1) = popt1
(m_log,b_log) = popt2
a = np.exp(b_log)
b = m_log
(a2,b2,c2)=popt3
if i < 3:
x = np.linspace(20/1000,50000/1000,1000)
else:
x = np.linspace(100/1000,50000/1000,1000)
linear_y = m1*x+b1
power_y = a*x**b
zero_y = m0*x
offset_y = a2*(x+c2)**b2
#linear_y = [b1,m1*1.1*max(x_2021)+b1]
#linear_x = [0,1.1*max(x_2021)]
residuals = y_2021- linear(x_2021, *popt1)
ss_res = np.sum(residuals**2)
ss_tot = np.sum((y_2021-np.mean(y_2021))**2)
r_squared = 1 - (ss_res / ss_tot)
r_squared_l = r_squared
residuals = log_y- (m_log*log_x+b_log)
ss_res = np.sum(residuals**2)
ss_tot = np.sum((np.log(y_2021)-np.mean(np.log(y_2021)))**2)
r_squared = 1 - (ss_res / ss_tot)
r_squared_p = r_squared
residuals = y_2021- zero_int(x_2021, *popt0)
ss_res = np.sum(residuals**2)
ss_tot = np.sum((y_2021-np.mean(y_2021))**2)
r_squared = 1 - (ss_res / ss_tot)
r_squared_z = r_squared
if i == 0:
residuals = y_2021- power_with_offset(x_2021, *popt3)
ss_res = np.sum(residuals**2)
ss_tot = np.sum((y_2021-np.mean(y_2021))**2)
r_squared = 1 - (ss_res / ss_tot)
r_squared_po = r_squared
if i == 0:
if b1<0:
sign = "-"
else:
sign = "+"
fit2 = ax.plot(x, offset_y,
color = 'r',
linestyle = '-.',
label = ('y=%4.2f(x+%4.1f)$^{%4.2f}$, r$^2_{loglog}$=%4.3f' %(a2,c2,b2,r_squared_po)),
linewidth = 0.5,
)
fit1 = ax.plot(x, linear_y,
color = 'b',
linestyle = '--',
label = ('y=%4.2fx%s%5.1f, r$^2$=%4.3f' % (m1,sign,abs(b1),r_squared_l)),
linewidth = 0.5,
)
#ax.set_ylim(9,250)
#ax.set_xlim(16/1000,110)
if i != 0:
fit3 = ax.plot(x, power_y,
color = 'r',
linestyle = '-.',
label = ('y=%4.2fx$^{%4.2f}$, r$^2_{loglog}$=%4.3f' % (a,b,r_squared_p)),
linewidth = 0.5,
)
ax.set_xlim(16/1000,110)
fit2 = ax.plot(x, zero_y,
color = 'b',
linestyle = '--',
label = ('y=%4.2fx, r$^2$=%4.3f' %(m0,r_squared_z)),
linewidth = 0.5,
)
ax.set_yscale('log')
ax.set_xscale('log')
if names[i] in raw_data_2020:
l_2020= ax.plot(x_2020,y_2020,
color = 'black',
markeredgewidth=0.5,
marker = 'x',
linestyle = 'none',
markersize = '3',
label = '2020 data',
)
l_2021= ax.plot(x_2021,y_2021,
color = 'black',
markeredgewidth=0.5,
marker = 'o',
linestyle = 'none',
markersize = '3',
fillstyle='none',
label = '2021 data',
)
#show the legend
ax.legend(frameon=False, loc='upper left', prop={'size':7})#, ncol=3)
ax.set_title(names[i], fontsize=9)
#handles, labels = ax[0].get_legend_handles_labels()
#order = [3,2,0,1]
#ax[0].legend([handles[idx] for idx in order], [labels[idx]for idx in order], frameon=False, loc='upper left', prop={'size':7})#, ncol=3)
print('got here')
#Set up the y axis labels
axs[0,0].set_ylabel('Sensor 1/R (mS)', fontsize = 9)
axs[1,0].set_ylabel('Sensor 1/R (mS)', fontsize = 9)
axs[1,0].set_xlabel('EXO EC (mS/cm)', fontsize = 9)
axs[1,1].set_xlabel('EXO EC (mS/cm)',fontsize = 9)
print('got here')
fig.tight_layout()
print('got here')
#FIGURE 2
#compute confidence limits of inverse of regression
def confidence_limits(x,y,b0,b1,m,xhat0,alpha):
n = x.size
yhat = b0 + b1*x
xbar = x.mean()
s2yx = ((y-yhat)**2).sum()/(n-2)
#syx = s2yx**0.5
s2xhat0 = (s2yx/b1**2)*(1/m+1/n+((xhat0-xbar)**2)/((x-xbar)**2).sum())
sxhat0 = s2xhat0**0.5
t_stat = t.ppf(1-alpha/2,df=n-2)
yplus = b0 + b1*xhat0 + t_stat*sxhat0
yminus = b0 + b1*xhat0 - t_stat*sxhat0
xplus = (yplus - b0)/b1
xminus = (yminus - b0)/b1
return(xminus,xhat0,xplus)
def error_analysis(x_obs,y_obs,m,b):
n = x_obs.size
y_est = m * x_obs + b
residuals = y_obs - y_est
ss_res = np.sum(residuals**2)
ss_tot = np.sum((y_obs-np.mean(y_obs))**2)
r_squared = 1 - (ss_res / ss_tot)
ser = np.sqrt((1/(n-2))*(np.sum(residuals**2)))
relative_error = residuals/y_obs
rmse = np.sqrt(np.sum(residuals**2)/n)
rmsre = np.sqrt(np.sum(relative_error**2)/n)
return(r_squared,ser,rmse,rmsre)
def find_and_plot_error_bands(x_obs, y_obs, use_offset, ax):
#Fit power function and adjust input by subracting constant and taking logs
if use_offset == True:
popt_test,pcov_test = curve_fit(power_with_offset,x_obs,y_obs)
(a,b,c)=popt_test
x_adjusted = np.log(x_obs+c)
y_adjusted = np.log(y_obs)
else:
c = 0
x_adjusted = np.log(x_obs)
y_adjusted = np.log(y_obs)
#Fit linear function to adjusted input
popt1,pcov1 = curve_fit(linear,x_adjusted,y_adjusted)
(b1,b0) = popt1
(r2_log,ser_log,rmse_log,rmsre_log)=error_analysis(x_adjusted,y_adjusted,b1,b0)
if use_offset == False:
a = np.exp(b0)
b = b1
#Set up arrays to plot
x_to_plot = np.linspace(0.95*min(x_obs),1.05*max(x_obs),1000)
log_x_adj_to_plot = np.log(x_to_plot+c)
log_y_to_plot = b0+b1*log_x_adj_to_plot
y_to_plot = np.exp(log_y_to_plot)
limits = []
for i in range(len(log_x_adj_to_plot)):
limits.append(confidence_limits(x_adjusted,y_adjusted,b0,b1,1,log_x_adj_to_plot[i],.05))
df_limits =
|
pd.DataFrame(limits)
|
pandas.DataFrame
|
import os
import pickle
from glob import glob
import numpy as np
import pandas as pd
from keras.applications import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image as kimage
np.set_printoptions(threshold=np.nan)
pd.set_option('display.height', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
|
pd.set_option('display.width', 1000)
|
pandas.set_option
|
import concurrent.futures
import datetime as dt
import sys
from pathlib import Path
import pandas as pd
import yfinance as yf
class FinanceAnalysis:
def analyze(self):
# Load data from file, generate data by running the `ticker_counts.py` script
data_directory = Path('./data')
input_path = data_directory / f'{dt.date.today()}_tick_df.csv'
df_tick = pd.read_csv(input_path).sort_values(by=['Mentions', 'Ticker'], ascending=False)
columns = ['Ticker', 'Name', 'Industry', 'PreviousClose', 'Low5d', 'High5d', 'ChangePercent1d', 'ChangePercent5d',
'ChangePercent1mo']
# Activate all tickers' info in parallel
self.tickers = yf.Tickers(df_tick['Ticker'].tolist())
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(lambda t: t.info, self.tickers.tickers)
self.data = self.tickers.download(period='1mo', group_by='ticker', progress=True)
fin_data = [self.get_ticker_info(tick) for tick in df_tick['Ticker']]
df_best =
|
pd.DataFrame(fin_data, columns=columns)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This is to find out why my R peaks at a value around 2021-07-01, that is
much higher than RIVM's.
Created on Fri Jul 23 12:52:53 2021
@author: hk_nien
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import tools
import nlcovidstats as nlcs
def get_Rt_rivm(mindate, maxdate):
"""Return Series with R(rivm). Note timestamps are always at time 12:00:00."""
df_rivm = nlcs.DFS['Rt_rivm'].copy()
# get 4 days extra from 'prognosis'
prog = df_rivm.loc[df_rivm['R'].isna()].iloc[:4]
prog_R = np.around(np.sqrt(prog['Rmin']*prog['Rmax']), 2)
df_rivm.loc[prog_R.index, 'R'] = prog_R
R_rivm = df_rivm.loc[~df_rivm['R'].isna(), 'R']
return R_rivm.loc[(R_rivm.index >= mindate) & (R_rivm.index <= maxdate)]
def get_Rt_mine(mindate, maxdate, slide_delay=True, cdf=None):
"""Return my Rt estimate, sampled at 12:00 daily.
Optionally provide cdf as test case; DataFrame with time index and
'Delta7r' column (7-day rolling average daily positive cases).
"""
from scipy.interpolate import interp1d
delay = nlcs.DELAY_INF2REP if slide_delay else 4.0
if cdf is None:
cdf, _npop = nlcs.get_region_data('Nederland', lastday=-1, correct_anomalies=True)
Rdf = nlcs.estimate_Rt_df(cdf['Delta7r'].iloc[10:], delay=delay, Tc=4.0)
r_interp = interp1d(
Rdf.index.astype(np.int64), Rdf['Rt'], bounds_error=False,
fill_value=(Rdf['Rt'].iloc[0], Rdf['Rt'].iloc[-1])
)
tlims = [pd.to_datetime(t).strftime('%Y-%m-%dT12:00')
for t in [mindate, maxdate]
]
index = pd.date_range(*tlims, freq='1d')
R_mine = pd.Series(r_interp(index.astype(int)), index=index)
return R_mine
def get_Rt_test_case(mindate, maxdate, case='step', slide_delay=True):
index = pd.date_range('2021-01-01', 'now', freq='1d')
cdf = pd.DataFrame(index=index + pd.Timedelta(4, 'd'))
cdf['Delta7r'] = 1000
if case == 'step':
cdf.loc[index >= '2021-07-01', 'Delta7r'] = 10000
# sudden factor 10 increase should result in
# 1 day R=1e+4 or 2 days R=1e+2, which is the case.
else:
raise ValueError(f'case={case!r}')
return get_Rt_mine(mindate, maxdate, slide_delay=slide_delay, cdf=cdf)
#%%
if __name__ == '__main__':
nlcs.reset_plots()
nlcs.init_data(autoupdate=True)
#%%
Rt_mine_fixD = get_Rt_mine('2021-06-22', '2021-07-20', slide_delay=False)
Rt_mine_varD = get_Rt_mine('2021-06-22', '2021-07-20', slide_delay=True)
Rt_rivm = get_Rt_rivm('2021-06-22', '2021-07-13')
# Rt_mine = get_Rt_test_case('2021-06-22', '2021-07-09', 'step', slide_delay=False)
cases_df, n_pop = nlcs.get_region_data('Nederland')
cases = cases_df['Delta'] * n_pop
cases7 = cases_df['Delta7r'] * n_pop
cases_mask = (cases.index >= '2021-06-22') & (cases.index <= '2021-07-23')
day =
|
pd.Timedelta(1, 'd')
|
pandas.Timedelta
|
# Author: <NAME>
# Github: Data-is-Life
# Date: 10/01/2018
import re
import pandas as pd
def rename_columns(strs_to_replace):
'''Keeping Dataframe heading formating consistant by converting all values
to standardized format that is easy to trace back. If left unformatted,
there could be duplicate columns with the same values and it would make it
far more challenging to search for homes.'''
modified_list = []
for num in strs_to_replace:
modified_list.append(num.replace('Redfin Estimate', 'redfin_est'
).replace(
'Beds', 'num_bdrs').replace('beds', 'num_bts').replace(
'Baths', 'num_bts').replace('$', 'price').replace(
'Built: ', 'yr_blt').lower().replace('__', '_').replace(
' ', '_').replace(':_', '').replace(':', '').replace(
'.', '').replace('sqft', 'sq_ft').replace('_(', '_').replace(
'(', '_').replace(')', '').replace(',', '').replace(
'minimum', 'min').replace('maximum', 'max').replace(
'bedrooms', 'beds').replace('bathrooms', 'baths').replace(
'#_of_', 'num_').replace('sq. ft.', 'sqft'))
return modified_list
def top_info_parser(soup):
'''Starting with getting the information at the very top of the page.
This takes information from the top of the page that highlights the main
attributes of the home, including latitude and longitude.'''
all_top = soup.findAll('div', {'class': 'HomeInfo inline-block'})
top_info_dict = {}
values_ = []
cats_ = []
sqft = []
lat_lon = []
for num in all_top:
# Getting the address
address_ = num.findAll('span', {'class': 'street-address'})
top_info_dict['address'] = [num.text for num in address_][0]
# Getting the city
city_ = num.findAll('span', {'class': 'locality'})
top_info_dict['city'] = [num.text for num in city_][0]
# Getting the state (maybe not needed?)
state_ = num.findAll('span', {'class': 'region'})
top_info_dict['state'] = [num.text for num in state_][0]
# Getting the zip-code
zip_code_ = num.findAll('span', {'class': 'postal-code'})
top_info_dict['zip_code'] = [num.text for num in zip_code_][0]
'''Getting the Redfin Estimate. This is important, since if the home
was sold a few months ago, the search should focus on the homes current
value and not for what it sold for. This make the results far more
efficiant.'''
red_est = num.findAll('div', {'class': 'info-block avm'})
for i in red_est:
values_.append(i.div.text)
cats_.append(i.span.text)
# If the Redfin estimate is not available, this is the fall back option.
price_ = num.findAll('div', {'class': 'info-block price'})
for i in price_:
values_.append(i.div.text)
cats_.append(i.span.text)
# Getting number of bedrooms
bdrs_ = num.findAll('div', {'data-rf-test-id': 'abp-beds'})
for i in bdrs_:
values_.append(i.div.text)
cats_.append(i.span.text)
# Getting number of bathrooms
bths_ = num.findAll('div', {'data-rf-test-id': 'abp-baths'})
for i in bths_:
values_.append(i.div.text)
cats_.append(i.span.text)
# Getting size of the home
sqft_ = num.findAll('div', {'data-rf-test-id': 'abp-sqFt'})
for i in sqft_:
top_info_dict['sqft'] = i.span.text[:6]
# Getting the year the home was built in
yrblt_ = num.findAll('div', {'class': 'HomeBottomStats'})
for i in yrblt_:
lbls_ = i.findAll('span', {'class': 'label'})
vals_ = i.findAll('span', {'class': 'value'})
for j in lbls_:
cats_.append(j.text)
for k in vals_:
values_.append(k.text)
# Getting latitude and longitude of the home
lat_lon_ = num.findAll('span', {'itemprop': 'geo'})
for i in lat_lon_:
ll_ = i.findAll('meta')
for num in ll_:
lat_lon.append(num['content'])
if len(lat_lon) >= 2:
top_info_dict['latitude'] = lat_lon[0]
top_info_dict['longitude'] = lat_lon[1]
# Checking to make sure the values are present for the fields
# If they are not avaialble, get rid of them.
values_ = [num for num in values_ if num != 'โ']
cats_ = [num for num in cats_ if num != 'โ']
# Putting everything in a dictionary, since it removes redundant columns
info_dict = dict(zip(cats_, values_))
# Merging the two dictionaries
all_info_dict = {**top_info_dict, **info_dict}
# Getting the home description
home_description = soup.find('p', {'class': 'font-b1'})
if home_description is not None:
all_info_dict['description'] = home_description.span.text
else:
all_info_dict['description'] = 'N/A'
return all_info_dict
def public_info_parser(soup):
'''Getting information from tax sources to ensure all the home information
matches from Zillow, Agent, and Tax records.'''
all_info = soup.findAll('div', {'data-rf-test-id': 'publicRecords'})
label_list = []
values_list = []
for num in all_info:
cats = num.findAll('span', {'class': 'table-label'})
for i in cats:
label_list.append(i.text)
for num in all_info:
vals = num.findAll('div', {'class': 'table-value'})
for i in vals:
values_list.append(i.text)
public_info_dict = dict(zip(label_list, values_list))
return public_info_dict
def school_parser(soup):
''' Getting schools and the grades they attend with their score from
GreatSchools this will be added as a feature for homes bigger than
three bedrooms and all single family homes.'''
school_dict = {}
school_info = soup.findAll('div', {'class': "name-and-info"})
school_names = []
school_grades = []
school_ratings = []
for num in school_info:
s_name = num.findAll('div', {'data-rf-test-name': 'school-name'})
s_grade = num.findAll('div', {'class': re.compile('^sub-info')})
s_rating = num.findAll('div', {'class': 'gs-rating-row'})
for i in s_name:
school_names.append(i.text)
for j in s_grade:
school_grades.append(j.text.replace(
' โข Serves this home', '').replace(' โข ', ' - '))
for k in s_rating:
school_ratings.append(
k.text[-5:].replace(' ', '').replace('/10', ''))
w = 0
while w < len(school_names):
if ('Public' in school_grades[w] and ((
('k' in school_grades[w] or 'Pre' in school_grades)
or '5' in school_grades[w]) or 'Elementary' in school_names[w])):
school_dict['elem_school_name'] = school_names[w]
school_dict['elem_school_grades'] = school_grades[
w].split(' - ', 1)[1]
school_dict['elem_school_rating'] = school_ratings[w]
w += 1
else:
w += 1
w = 0
while w < len(school_names):
if ('Public' in school_grades[w] and ((
('7' in school_grades[w] or '8' in school_grades)
or 'Middle' in school_names[w]) or 'Junior' in school_names[w])):
school_dict['middle_school_name'] = school_names[w].title()
school_dict['middle_school_grades'] = school_grades[
w].split(' - ', 1)[1].title()
school_dict['middle_school_rating'] = school_ratings[w].title()
w += 1
else:
w += 1
w = 0
while w < len(school_names):
if ('Public' in school_grades[w] and (
('12' in school_grades or 'High' in school_names[w]))):
school_dict['high_school_name'] = school_names[w].title()
school_dict['high_school_grades'] = school_grades[
w].split(' - ', 1)[1].title()
school_dict['high_school_rating'] = school_ratings[w].title()
w += 1
else:
w += 1
if 'elem_school_name' not in school_dict.keys():
school_dict['elem_school_name'] = 'N/A'
school_dict['elem_school_grades'] = 'N/A'
school_dict['elem_school_rating'] = 'N/A'
if 'middle_school_name' not in school_dict.keys():
school_dict['middle_school_name'] = 'N/A'
school_dict['middle_school_grades'] = 'N/A'
school_dict['middle_school_rating'] = 'N/A'
if 'high_school_name' not in school_dict.keys():
school_dict['high_school_name'] = 'N/A'
school_dict['high_school_grades'] = 'N/A'
school_dict['high_school_rating'] = 'N/A'
return school_dict
def feats_parser(soup):
'''All the listed features by the agent/broker inputting the listing
on the MLS.'''
all_home_feats = soup.findAll('span', {'class': "entryItemContent"})
feat_cats = []
feat_vals = []
for num in all_home_feats:
feat_cats.append(num.contents[0])
for num in all_home_feats:
feat_vals.append(num.span.text)
cats_set = set(feat_cats)
vals_set = set(feat_vals)
redundant = cats_set & vals_set
for num in redundant:
feat_cats.remove(num)
feat_vals.remove(num)
feat_cats = [str(num) for num in feat_cats]
feat_vals = [str(num) for num in feat_vals]
feats_dict = dict(zip(feat_cats, feat_vals))
extra_feats = []
for k, v in feats_dict.items():
if 'span>' in k:
extra_feats.append(k)
for num in extra_feats:
if num in feats_dict.keys():
feats_dict.pop(num)
# This is to replace all the HTML tags
extra_feats = [num.replace('<span>', '').replace('</span>', '').replace(
'<a href=', '').replace('"', '').replace(' rel=nofollow', '').replace(
' target=_blank>', '').replace('Virtual Tour (External Link)', '').replace(
'</a', '').replace('>', '').replace('&', '&').replace('(s)', '') for num
in extra_feats]
x_feat_string = ', '.join([num for num in extra_feats])
x_feat_string = x_feat_string.split(sep=', ')
x_feat_list = list(set(x_feat_string))
feats_dict['extra_feats'] = ', '.join([num for num in x_feat_list])
return feats_dict
def additional_info(soup):
'''Need to get additional information, so we don't miss anything that
could prove to be critical later.'''
cats_ = soup.findAll('span', {'class': re.compile('^header ')})
cats_ = [num.text for num in cats_]
vals_ = soup.findAll('span', {'class': re.compile('^content ')})
vals_ = [num.text for num in vals_]
cats_ = [str(num).replace('Property Type', 'prop_type').replace(
'HOA Dues', 'hoa_fees').replace('Type', 'prop_type') for num in cats_]
vals_ = [str(num).replace('$', '').replace('/month', '').replace(
'Hi-Rise', 'Condo').replace('Residential', 'Single Family Residence')
for num in vals_]
return dict(zip(cats_, vals_))
def info_from_property(soup):
''' Putting all the information together in a Dataframe and removing any
duplicate columns.'''
top_info_dict = top_info_parser(soup)
public_info_dict = public_info_parser(soup)
school_dict = school_parser(soup)
all_home_feats = feats_parser(soup)
mid_info_feats = additional_info(soup)
df1 = pd.DataFrame(top_info_dict, index=[1])
df2 = pd.DataFrame(public_info_dict, index=[1])
df3 = pd.DataFrame(school_dict, index=[1])
df4 = pd.DataFrame(all_home_feats, index=[1])
df5 = pd.DataFrame(mid_info_feats, index=[1])
df = pd.DataFrame()
df = pd.concat([df1, df2, df3, df4, df5], axis=1)
df.columns = rename_columns(df.columns)
all_dict = df.to_dict()
new_df =
|
pd.DataFrame(all_dict)
|
pandas.DataFrame
|
import pandas as pd
from patternsum.data import DataManager
from patternsum.pattern import Pattern
from patternsum.optimizer import Optimizer
from .hook import NoNewSpeciesHook
class PatternSummarization:
def __init__(self, data, population_size, n_survivors, prob_mutate=0.9, prob_mutate_add=0.20,
prob_mutate_merge=0.30, prob_mutate_split=0.30, prob_mutate_drop=0.20,
join_thresh=0.6, alpha=2, beta=1,
n_best=None, min_acc=0.25, conv_thresh=1, min_conv_size=2,
n_pools=None, random_seed=None):
self.data_manager = DataManager(data=pd.Series(data).reset_index(drop=True))
self.n_best = n_best
self.min_acc = min_acc
self.conv_thresh = conv_thresh
self.min_conv_size = min_conv_size
self.n_pools = n_pools
self.random_seed = random_seed
self.optimizer = Optimizer(data_manager=self.data_manager, population_size=population_size,
n_survivors=n_survivors, tightness_alpha=alpha, tightness_beta=beta,
join_thresh=join_thresh, prob_mutate=prob_mutate,
prob_mutate_add=prob_mutate_add, prob_mutate_merge=prob_mutate_merge,
prob_mutate_split=prob_mutate_split, prob_mutate_drop=prob_mutate_drop,
n_pools=self.n_pools, random_seed=self.random_seed)
@property
def n_survivors(self):
return self.optimizer.n_survivors
@property
def population_size(self):
return self.optimizer.population_size
def _initialize(self):
self.optimizer.set_random_seed(self.random_seed)
self.optimizer.initialize()
def evolve(self, generations=1, reset=False, n_no_new_species=None):
if reset or self.optimizer.generation == 0:
self._initialize()
hooks = []
if n_no_new_species is not None:
hooks.append(NoNewSpeciesHook(n_no_new_species))
for _ in range(generations):
self.optimizer.step()
for hook in hooks:
codes = hook.callback(self.optimizer)
for code in codes:
if code == 'end':
return
def get_patterns(self):
species = filter(lambda s: s.convergence >= self.conv_thresh, self.optimizer.species[:self.n_best])
species = filter(lambda s: s.convergence_size >= self.min_conv_size, species)
patterns = map(lambda s: s.ancestor.copy(), species)
patterns = filter(lambda p: p.accuracy >= self.min_acc, patterns)
patterns = tuple(patterns)
return patterns
def get_species_report(self):
result = []
for species in self.optimizer.species:
ancestor = species.ancestor # type: Pattern
report = {
'species': species,
'ancestor': ancestor,
'species_fitness': species.fitness,
'species_size': len(species),
'convergence': species.convergence,
'convergence_size': species.convergence_size,
'fitness': ancestor.fitness,
'accuracy': ancestor.accuracy,
'tightness': ancestor.tightness,
}
result.append(report)
return
|
pd.DataFrame(result)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
#import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from matplotlib.ticker import PercentFormatter
import logging
from datetime import datetime
from antalla.db import session
from antalla import models
def get_exchange_trades(buy_sym_id, sell_sym_id, exchange):
query = ("select exchange_trade_id, name, timestamp, trade_type, buy_sym_id, sell_sym_id, price,"+
" size from trades inner join exchanges on exchanges.id = trades.exchange_id" +
" where name = '" + exchange.lower() +"' and buy_sym_id = '" + buy_sym_id.upper() +
"' and sell_sym_id = '" + sell_sym_id.upper() + "' order by timestamp asc")
print(query)
return session.execute(query)
def parse_raw_trades(raw_trades):
trades = []
for trade in list(raw_trades):
trades.append(dict(
exchange=trade[1],
timestamp=trade[2],
type=trade[3],
buy_sym_id=trade[4],
sell_sym_id=trade[5],
price=trade[6],
size=trade[7],
volume=float(trade[6]*trade[7])
))
return trades
def plot_hourly_trade_vol(buy_sym_id, sell_sym_id, exchanges):
t = []
formatter = DateFormatter('%Y-%m-%d %H:%M')
fig, ax = plt.subplots()
for exchange in exchanges:
raw_trades = get_exchange_trades(buy_sym_id, sell_sym_id, exchange)
parsed_trades = parse_raw_trades(raw_trades)
plot_individual_trades(parsed_trades, ax, exchange)
ax.set(xlabel="Timestamp", ylabel="Trade volume ("+buy_sym_id+")",
title="Sum of Trade Volume Per Hour: "+buy_sym_id+"-"+sell_sym_id)
ax.xaxis.set_major_formatter(formatter)
ax.grid()
ax.legend()
plt.xticks(rotation="vertical")
plt.show()
def plot_individual_trades(all_trades, ax, exchange):
trades = list(filter(lambda x: x["exchange"] == exchange, all_trades))
times = list(map(lambda x: x["timestamp"], trades))
volumes = list(map(lambda x: x["volume"], trades))
df = pd.DataFrame()
df["timestamp"] = pd.to_datetime(times)
df["volumes"] = volumes
df.index = df["timestamp"]
df_bins = df.resample('H').sum()
ax.plot(df_bins.index, df_bins["volumes"], label=exchange, linewidth=2)
def parse_market(raw_market, sym_1, sym_2):
"""
>>> raw_market = 'BTCETH'
>>> parse_market(raw_market, 'ETH', 'BTC')
('BTC', 'ETH')
>>> parse_market("QTMBTC", 'QTM', 'BTC')
('QTM', 'BTC')
"""
if sym_1 not in raw_market or sym_2 not in raw_market:
return None
elif raw_market[0:len(sym_1)] == sym_1:
return (sym_1, sym_2)
else:
return (sym_2, sym_1)
def is_original_market(buy_sym_id, sell_sym_id, exchange):
query = (
"select name, first_coin_id, second_coin_id, original_name from exchange_markets inner join exchanges on exchanges.id" +
" = exchange_markets.exchange_id where name = '" + exchange.lower() + "'"
)
all_markets = session.execute(query)
for row in list(all_markets):
if row[1] == buy_sym_id and row[2] == sell_sym_id:
market = parse_market(row[3], buy_sym_id, sell_sym_id)
return market
elif row[1] == sell_sym_id and row[2] == buy_sym_id:
market = parse_market(row[3], sell_sym_id, buy_sym_id)
return market
return None
def invert_trades(raw_trades):
trades = []
for trade in list(raw_trades):
modified_trade = dict(
exchange=trade[1],
timestamp=trade[2],
)
if trade[3] == "sell":
modified_trade["type"]="sell"
else:
modified_trade["type"]="buy"
modified_trade["buy_sym_id"]=trade[5]
modified_trade["sell_sym_id"]=trade[4]
price = float(1.0/trade[6])
modified_trade["price"]=price
modified_trade["size"]=float((trade[6]*trade[7])*(float(1.0/trade[6])))*trade[6]
modified_trade["volume"]=float((trade[6]*trade[7])*(float(1.0/trade[6])))
trades.append(modified_trade)
return trades
def plot_volume_bar_chart(buy_sym_id, sell_sym_id, exchange, plot_id):
# check if market is original market
original_market = is_original_market(buy_sym_id, sell_sym_id, exchange)
trades = []
if original_market is None:
return
elif original_market[0] == sell_sym_id and original_market[1] == buy_sym_id:
raw_trades = get_exchange_trades(sell_sym_id, buy_sym_id, exchange)
trades = invert_trades(raw_trades)
else:
raw_trades = get_exchange_trades(buy_sym_id, sell_sym_id, exchange)
trades = parse_raw_trades(raw_trades)
times = list(map(lambda x: x["timestamp"], trades))
volumes = list(map(lambda x: x["volume"], trades))
prices = list(map(lambda x: x["price"], trades))
sell_volume = list(map(lambda x: x["volume"] if x["type"] == "sell" else 0, trades))
buy_volume = list(map(lambda x: x["volume"] if x["type"] == "buy" else 0, trades))
sells = list(filter(lambda x: x["type"] == "sell", trades))
sell_times = list(map(lambda x: x["timestamp"], sells))
sell_prices = list(map(lambda x: x["price"], sells))
buys = list(filter(lambda x: x["type"] == "buy", trades))
buy_times = list(map(lambda x: x["timestamp"], buys))
buy_prices = list(map(lambda x: x["price"], buys))
fig, axs = plt.subplots(2, sharex=True)
df = pd.DataFrame()
df["timestamp"] = pd.to_datetime(times)
df["volumes"] = volumes
df["prices"] = prices
df["sell_volume"] = sell_volume
df["buy_volume"] = buy_volume
df.index = df["timestamp"]
# bin trades in 1 hour bins
df_bins = df.resample('30T').sum()
p1 = axs[0].bar(df_bins.index, df_bins["sell_volume"], width=0.01, color="steelblue")
p2 = axs[0].bar(df_bins.index, df_bins["buy_volume"], width=0.01, color="tomato")
formatter = DateFormatter('%Y-%m-%d %H:%M')
axs[0].set(title=exchange+": Traded Volume and Price per Hour: " + buy_sym_id + "-" + sell_sym_id)
axs[0].xaxis.set_major_formatter(formatter)
axs[0].set_ylabel("Total trade volume ("+buy_sym_id.upper()+")")
axs[0].grid()
axs[0].legend((p1[0], p2[0]), ("Sell volume", "Buy volume"))
plt.xticks(rotation="vertical")
plt.figure(plot_id)
if len(sells) == 0 or len(buys) == 0:
plt.xticks(rotation="vertical")
plt.figure(plot_id)
return
df_sells = pd.DataFrame()
df_sells["timestamp"] = sell_times
df_sells["prices"] = sell_prices
df_sells.index = df_sells["timestamp"]
sell_bins = df_sells.resample('30T').mean().fillna(0)
# avg sell price
ps = axs[1].plot(sell_bins.index, sell_bins["prices"])
df_buys =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Tests for the construct_estimator.py file.
"""
import unittest
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.sparse import csr_matrix, eye
import pylogit.asym_logit as asym
import pylogit.conditional_logit as mnl
import pylogit.clog_log as clog
import pylogit.scobit as scobit
import pylogit.uneven_logit as uneven
import pylogit.mixed_logit_calcs as mlc
import pylogit.mixed_logit as mixed_logit
import pylogit.nested_logit as nested_logit
import pylogit.construct_estimator as constructor
class ConstructorTests(unittest.TestCase):
def make_asym_model(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
fake_shapes = np.array([-1, 1])
# Create names for the intercept parameters
fake_shape_names = ["Shape 1", "Shape 2"]
# Record the position of the shape parameter that is being constrained
fake_shape_ref_pos = 2
# Calculate the 'natural' shape parameters
natural_shapes = asym._convert_eta_to_c(fake_shapes,
fake_shape_ref_pos)
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_shapes,
fake_intercepts,
fake_betas))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the Asymmetric Logit constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
constructor_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
# Create a variable for the kwargs being passed to the constructor
constructor_kwargs = {"intercept_ref_pos": fake_intercept_ref_pos,
"shape_ref_pos": fake_shape_ref_pos,
"names": fake_names,
"intercept_names": fake_intercept_names,
"shape_names": fake_shape_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
model_obj = asym.MNAL(*constructor_args, **constructor_kwargs)
model_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
model_obj.intercepts =\
|
pd.Series(fake_intercepts, index=fake_intercept_names)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""Converter for CGNC."""
import logging
from typing import Iterable
import pandas as pd
from pyobo.struct import Obo, Reference, Term, from_species
from pyobo.utils.path import ensure_df
PREFIX = "cgnc"
URL = "http://birdgenenames.org/cgnc/downloads.jsp?file=standard"
logger = logging.getLogger(__name__)
def get_obo(force: bool = False) -> Obo:
"""Get CGNC as OBO."""
return Obo(
iter_terms=get_terms,
iter_terms_kwargs=dict(force=force),
name="CGNC",
ontology=PREFIX,
typedefs=[from_species],
auto_generated_by=f"bio2obo:{PREFIX}",
)
HEADER = [
"cgnc_id",
"ncbigene_id",
"ensembl_id",
"name",
"synonym_1",
"synonym_2",
"curation status",
"last edit date",
]
def get_terms(force: bool = False) -> Iterable[Term]:
"""Get CGNC terms."""
df = ensure_df(PREFIX, url=URL, name=f"{PREFIX}.tsv", force=force, header=0, names=HEADER)
for i, (cgnc_id, entrez_id, ensembl_id, name, synonym_1, synoynm_2, _, _) in enumerate(
df.values
):
if pd.isna(cgnc_id):
logger.warning(f"row {i} CGNC ID is none")
continue
try:
int(cgnc_id)
except ValueError:
logger.warning(f"row {i} CGNC ID is not int-like: {cgnc_id}")
continue
term = Term.from_triple(
prefix=PREFIX,
identifier=cgnc_id,
name=name,
)
term.set_species(identifier="9031", name="Gallus gallus")
if entrez_id and pd.notna(entrez_id):
term.append_xref(Reference(prefix="ncbigene", identifier=entrez_id))
if pd.notna(ensembl_id):
term.append_xref(Reference(prefix="ensembl", identifier=ensembl_id))
if synonym_1 and pd.notna(synonym_1):
term.append_synonym(synonym_1)
if synoynm_2 and
|
pd.notna(synoynm_2)
|
pandas.notna
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Helper module for performing MTSL GWAS inside of MTMLSEM model.
Note that this has little to do with MTMLSEM, it merely fits the classical LMM
model of the kind:
Y = X B + E + U,
where Y and X are deterministic data matrices, B is a matrix of regression
coefficients, E and U are matrices random matrices with U being the random
effect matrix, that takes genetic kinship between individuals into an account.
"""
from itertools import combinations, product
from tqdm import tqdm
import pandas as pd
import numpy as np
from utils import translate_names, unique_mapping
def gwas_lmm(Model, y: list[str], phenos, genes, desc='', init_args=None,
fit_args=None, dropna=True, verbose=True):
"""
Multi-trait single-locus GWAS via linear (possibly mixed) model.
Parameters
----------
Model : class
semopy class.
y : list[str]
List of phenotype names.
phenos : pd.DataFrame
Phenotypes + possibly other variables.
genes : pd.DataFrame
Genotypes/SNPs.
desc : str, optional
Extra model description. The default is ''.
init_args : dict, optional
Extra arguments for Model constructor. The default is None.
fit_args : dict, optional
Extra arguments for Model fit method (e.g., k). The default is None.
dropna : bool, optional
If True, then NaN rows are dropped for each gene test. The default is
True.
Returns
-------
pd.DataFrame
GWAS results.
"""
if init_args is None:
init_args = dict()
if fit_args is None:
fit_args = dict()
res = list()
desc= desc + '\n{} ~ snp'.format(', '.join(y))
for a, b in combinations(y, 2):
desc += f'\n{a} ~~ {b}'
m = Model(desc, **init_args)
phenos = phenos.copy()
it = genes.iteritems()
if verbose:
it = tqdm(list(it))
for name, gene in it:
chr, pos = translate_names(name)
phenos['snp'] = gene.values
if dropna:
data = phenos.dropna()
try:
r = m.fit(data, clean_slate=True, **fit_args)
if type(r) is not tuple:
succ = r.success
fun = r.fun
else:
succ = r[0].success & r[1].success
fun = r[1].fun
except np.linalg.LinAlgError:
succ = False
if not succ:
t = [name, chr, pos, float('nan')] + [1.0] * len(y)
t += [float('nan')] * len(y)
res.append(t)
else:
ins = m.inspect()
ins = ins[(ins['rval'] == 'snp') & (ins['op'] == '~')]
pvals = list()
ests = list()
for _, row in ins.iterrows():
pvals.append(row['p-value'])
ests.append(row['Estimate'])
res.append([name, chr, pos, fun] + pvals + ests)
cols = ['SNP', 'chr', 'pos'] + [f'{p}_p-value' for p in y] + [f'{p}_b'
for p in y]
return pd.DataFrame(res, columns=cols)
def gwas_w(lt):
gs, lt = lt
mod, y, phenos, genes, desc, init_args, fit_args = lt
return gwas(mod, y, phenos, genes[gs], desc, init_args, fit_args,
verbose=False)
def gwas(Model, y: list[str], phenos, genes, desc='', init_args=None,
fit_args=None, num_processes=-1, chunk_size=1000, verbose=True):
"""
Multi-trait single-locus GWAS with multiprocessing support.
Parameters
----------
Model : class
semopy class.
y : list[str]
List of phenotype names.
phenos : pd.DataFrame
Phenotypes + possibly other variables.
genes : pd.DataFrame
Genotypes/SNPs.
desc : str, optional
Extra model description. The default is ''.
init_args : dict, optional
Extra arguments for Model constructor. The default is None.
fit_args : dict, optional
Extra arguments for Model fit method (e.g., k). The default is None.
num_processes : int, optional
Number of processes to run. If -1, then it is selected to number of
avaialable CPU cores minus 1. "None" is the same as 1. The default is
-1.
chunk_size : int, optional
Number of SNPs to be sent onto a single process. The default is 1000.
verbose : bool, optional
If False, then no progress bar will be printed. The default is True.
Returns
-------
pd.DataFrame
GWAS results.
"""
from multiprocessing import Pool, cpu_count
from tqdm.contrib.concurrent import process_map
if num_processes == -1:
num_processes = cpu_count() - 1
if num_processes in (None, 0, 1):
return gwas_lmm(Model, y, phenos, genes, desc, init_args, fit_args,
verbose=verbose)
# We rule out duplicate SNPs to ease the computational burden:
unique = unique_mapping(genes)
genes = genes[list(unique.keys())]
result = None
lt = list(genes.columns)
lt2 = [lt[i:i+chunk_size] for i in range(0, len(lt), chunk_size)]
lt = (Model, y, phenos, genes, desc, init_args, fit_args)
prod = product(lt2, lt)
if not verbose:
with Pool(num_processes) as p:
for t in p.map(gwas_w, prod):
if result is None:
result = t
else:
result =
|
pd.concat([result, t])
|
pandas.concat
|
# Copyright 2021 <NAME>, <NAME>, <NAME>.
# Licensed under the BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
# This file may not be copied, modified, or distributed
# except according to those terms.
import sys
sys.stderr = open(snakemake.log[0], "w")
import altair as alt
import pandas as pd
import pysam
from intervaltree import IntervalTree
# read primer bedpe to df
PRIMER = pd.read_csv(snakemake.params.get("bed", ""), delimiter="\t", header=None)
PRIMER.drop(PRIMER.columns[[0, 3]], axis=1, inplace=True)
PRIMER.columns = ["p1_start", "p1_end", "p2_start", "p2_end"]
# convert df to interval trees
primer_intervals = IntervalTree()
no_primer_intervals = IntervalTree()
for index, row in PRIMER.iterrows():
primer_intervals[row["p1_start"] : row["p2_end"] + 1] = (
row["p1_start"],
row["p2_end"] + 1,
)
no_primer_intervals[row["p1_end"] + 1 : row["p2_start"]] = (
row["p1_end"] + 1,
row["p2_start"],
)
def iter_with_samples(inputfiles):
return zip(snakemake.params.samples, inputfiles)
def count_intervals(file):
with pysam.AlignmentFile(file, "rb") as bam:
counter_primer = 0
counter_no_primer = 0
counter_primer_within = 0
counter_no_primer_within = 0
counter_nothing = 0
mate_pair_intervals = {}
for read in bam.fetch():
if not mate_pair_intervals.get(read.query_name):
mate_pair_intervals[read.query_name] = [read.reference_start]
else:
mate_pair_intervals[read.query_name].append(read.reference_end)
for pair in mate_pair_intervals:
if (
len(mate_pair_intervals[pair]) > 1
and mate_pair_intervals[pair][0] != None
and mate_pair_intervals[pair][1] != None
):
if primer_intervals.envelop(
mate_pair_intervals[pair][0], mate_pair_intervals[pair][1] + 1
):
if (
sorted(
primer_intervals.envelop(
mate_pair_intervals[pair][0],
mate_pair_intervals[pair][1] + 1,
)
)[0].begin
== mate_pair_intervals[pair][0]
and sorted(
primer_intervals.envelop(
mate_pair_intervals[pair][0],
mate_pair_intervals[pair][1] + 1,
)
)[0].end
== mate_pair_intervals[pair][1] + 1
):
counter_primer += 1
else:
counter_primer_within += 1
elif no_primer_intervals.envelop(
mate_pair_intervals[pair][0] + 1, mate_pair_intervals[pair][1]
):
if (
sorted(
no_primer_intervals.envelop(
mate_pair_intervals[pair][0] + 1,
mate_pair_intervals[pair][1],
)
)[0].begin
== mate_pair_intervals[pair][0] + 1
and sorted(
no_primer_intervals.envelop(
mate_pair_intervals[pair][0] + 1,
mate_pair_intervals[pair][1],
)
)[0].end
== mate_pair_intervals[pair][1]
):
counter_no_primer += 1
else:
counter_no_primer_within += 1
else:
counter_nothing += 1
else:
counter_nothing += 1
counters = pd.DataFrame(
{
"n_count": [
counter_primer,
counter_primer_within,
counter_no_primer,
counter_no_primer_within,
counter_nothing,
],
"class": [
"uncut primer exact",
"uncut primer within",
"cut primer exact",
"cut primer within",
"no mathing win",
],
}
)
return counters
def plot_classes(counters):
bars = (
alt.Chart(counters)
.mark_bar()
.encode(
y="class",
x="n_count",
row=alt.Row("sample:N"),
column=alt.Column("state:N", sort="descending"),
)
)
text = bars.mark_text(
align="left",
baseline="middle",
dx=3, # Nudges text to right so it doesn't appear on top of the bar
).encode(text="n_count", row=alt.Row("sample:N"), column=alt.Column("state:N"))
return bars, text
all_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pandas as pd
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
import mars.tensor as mt
import mars.dataframe as md
from mars.executor import register, Executor
from mars.tensor.core import TensorOrder
from mars.tensor.datasource import ArrayDataSource
from mars.tiles import get_tiled
from mars.session import new_session, Session
class Test(unittest.TestCase):
def setUp(self):
new_session().as_default()
def testSessionExecute(self):
a = mt.random.rand(10, 20)
res = a.sum().to_numpy()
self.assertTrue(np.isscalar(res))
self.assertLess(res, 200)
def testSessionAsyncExecute(self):
raw_a = np.random.RandomState(0).rand(10, 20)
a = mt.tensor(raw_a)
expected = raw_a.sum()
res = a.sum().to_numpy(wait=False).result()
self.assertEqual(expected, res)
res = a.sum().execute(wait=False)
res = res.result().fetch()
self.assertEqual(expected, res)
raw_df = pd.DataFrame(raw_a)
expected = raw_df.sum()
df = md.DataFrame(a)
res = df.sum().to_pandas(wait=False).result()
pd.testing.assert_series_equal(expected, res)
res = df.sum().execute(wait=False)
res = res.result().fetch()
pd.testing.assert_series_equal(expected, res)
t = [df.sum(), a.sum()]
res = mt.ExecutableTuple(t).to_object(wait=False).result()
pd.testing.assert_series_equal(raw_df.sum(), res[0])
self.assertEqual(raw_a.sum(), res[1])
res = mt.ExecutableTuple(t).execute(wait=False)
res = res.result().fetch()
pd.testing.assert_series_equal(raw_df.sum(), res[0])
self.assertEqual(raw_a.sum(), res[1])
def testMultipleOutputExecute(self):
data = np.random.random((5, 9))
# test multiple outputs
arr1 = mt.tensor(data.copy(), chunk_size=3)
result = mt.modf(arr1).execute().fetch()
expected = np.modf(data)
np.testing.assert_array_equal(result[0], expected[0])
np.testing.assert_array_equal(result[1], expected[1])
# test 1 output
arr2 = mt.tensor(data.copy(), chunk_size=3)
result = ((arr2 + 1) * 2).to_numpy()
expected = (data + 1) * 2
np.testing.assert_array_equal(result, expected)
# test multiple outputs, but only execute 1
arr3 = mt.tensor(data.copy(), chunk_size=3)
arrs = mt.split(arr3, 3, axis=1)
result = arrs[0].to_numpy()
expected = np.split(data, 3, axis=1)[0]
np.testing.assert_array_equal(result, expected)
# test multiple outputs, but only execute 1
data = np.random.randint(0, 10, (5, 5))
arr3 = (mt.tensor(data) + 1) * 2
arrs = mt.linalg.qr(arr3)
result = (arrs[0] + 1).to_numpy()
expected = np.linalg.qr((data + 1) * 2)[0] + 1
np.testing.assert_array_almost_equal(result, expected)
result = (arrs[0] + 2).to_numpy()
expected = np.linalg.qr((data + 1) * 2)[0] + 2
np.testing.assert_array_almost_equal(result, expected)
s = mt.shape(0)
result = s.execute().fetch()
expected = np.shape(0)
self.assertEqual(result, expected)
def testReExecuteSame(self):
data = np.random.random((5, 9))
# test run the same tensor
arr4 = mt.tensor(data.copy(), chunk_size=3) + 1
result1 = arr4.to_numpy()
expected = data + 1
np.testing.assert_array_equal(result1, expected)
result2 = arr4.to_numpy()
np.testing.assert_array_equal(result1, result2)
# test run the same tensor with single chunk
arr4 = mt.tensor(data.copy())
result1 = arr4.to_numpy()
expected = data
np.testing.assert_array_equal(result1, expected)
result2 = arr4.to_numpy()
np.testing.assert_array_equal(result1, result2)
# modify result
sess = Session.default_or_local()
executor = sess._sess._executor
executor.chunk_result[get_tiled(arr4).chunks[0].key] = data + 2
result3 = arr4.to_numpy()
np.testing.assert_array_equal(result3, data + 2)
# test run same key tensor
arr5 = mt.ones((10, 10), chunk_size=3)
result1 = arr5.to_numpy()
del arr5
arr6 = mt.ones((10, 10), chunk_size=3)
result2 = arr6.to_numpy()
np.testing.assert_array_equal(result1, result2)
# test copy, make sure it will not let the execution cache missed
df = md.DataFrame(mt.ones((10, 3), chunk_size=5))
executed = [False]
def add_one(x):
if executed[0]: # pragma: no cover
raise ValueError('executed before')
return x + 1
df2 = df.apply(add_one)
pd.testing.assert_frame_equal(df2.to_pandas(), pd.DataFrame(np.ones((10, 3)) + 1))
executed[0] = True
df3 = df2.copy()
df4 = df3 * 2
pd.testing.assert_frame_equal(df4.to_pandas(), pd.DataFrame(np.ones((10, 3)) * 4))
def testExecuteBothExecutedAndNot(self):
data = np.random.random((5, 9))
arr1 = mt.tensor(data, chunk_size=4) * 2
arr2 = mt.tensor(data) + 1
np.testing.assert_array_equal(arr2.to_numpy(), data + 1)
# modify result
sess = Session.default_or_local()
executor = sess._sess._executor
executor.chunk_result[get_tiled(arr2).chunks[0].key] = data + 2
results = sess.run(arr1, arr2)
np.testing.assert_array_equal(results[0], data * 2)
np.testing.assert_array_equal(results[1], data + 2)
def testTensorExecuteNotFetch(self):
data = np.random.random((5, 9))
sess = Session.default_or_local()
arr1 = mt.tensor(data, chunk_size=2) * 2
with self.assertRaises(ValueError):
sess.fetch(arr1)
self.assertIs(arr1.execute(), arr1)
# modify result
executor = sess._sess._executor
executor.chunk_result[get_tiled(arr1).chunks[0].key] = data[:2, :2] * 3
expected = data * 2
expected[:2, :2] = data[:2, :2] * 3
np.testing.assert_array_equal(arr1.to_numpy(), expected)
def testDataFrameExecuteNotFetch(self):
data1 = pd.DataFrame(np.random.random((5, 4)), columns=list('abcd'))
sess = Session.default_or_local()
df1 = md.DataFrame(data1, chunk_size=2)
with self.assertRaises(ValueError):
sess.fetch(df1)
self.assertIs(df1.execute(), df1)
self.assertEqual(len(df1[df1['a'] > 1].to_pandas(fetch_kwargs={'batch_size': 2})), 0)
self.assertEqual(len(df1[df1['a'] > 1]['a'].to_pandas(fetch_kwargs={'batch_size': 2})), 0)
# modify result
executor = sess._sess._executor
executor.chunk_result[get_tiled(df1).chunks[0].key] = data1.iloc[:2, :2] * 3
expected = data1
expected.iloc[:2, :2] = data1.iloc[:2, :2] * 3
pd.testing.assert_frame_equal(df1.to_pandas(), expected)
pd.testing.assert_frame_equal(df1.to_pandas(fetch_kwargs={'batch_size': 2}), expected)
def testClosedSession(self):
session = new_session()
arr = mt.ones((10, 10))
result = session.run(arr)
np.testing.assert_array_equal(result, np.ones((10, 10)))
session.close()
with self.assertRaises(RuntimeError):
session.run(arr)
with self.assertRaises(RuntimeError):
session.run(arr + 1)
def testBoolIndexing(self):
arr = mt.random.rand(10, 10, chunk_size=5)
arr[3:8, 3:8] = mt.ones((5, 5))
arr2 = arr[arr == 1]
self.assertEqual(arr2.shape, (np.nan,))
arr2.execute()
self.assertEqual(arr2.shape, (25,))
arr3 = arr2.reshape((5, 5))
expected = np.ones((5, 5))
np.testing.assert_array_equal(arr3.to_numpy(), expected)
def testArrayProtocol(self):
arr = mt.ones((10, 20))
result = np.asarray(arr)
np.testing.assert_array_equal(result, np.ones((10, 20)))
arr2 = mt.ones((10, 20))
result = np.asarray(arr2, mt.bool_)
np.testing.assert_array_equal(result, np.ones((10, 20), dtype=np.bool_))
arr3 = mt.ones((10, 20)).sum()
result = np.asarray(arr3)
np.testing.assert_array_equal(result, np.asarray(200))
arr4 = mt.ones((10, 20)).sum()
result = np.asarray(arr4, dtype=np.float_)
np.testing.assert_array_equal(result, np.asarray(200, dtype=np.float_))
def testRandomExecuteInSessions(self):
arr = mt.random.rand(20, 20)
sess1 = new_session()
res1 = sess1.run(arr)
sess2 = new_session()
res2 = sess2.run(arr)
np.testing.assert_array_equal(res1, res2)
def testFetch(self):
sess = new_session()
arr1 = mt.ones((10, 5), chunk_size=3)
r1 = sess.run(arr1)
r2 = sess.run(arr1)
np.testing.assert_array_equal(r1, r2)
executor = sess._sess._executor
executor.chunk_result[get_tiled(arr1).chunks[0].key] = np.ones((3, 3)) * 2
r3 = sess.run(arr1 + 1)
np.testing.assert_array_equal(r3[:3, :3], np.ones((3, 3)) * 3)
# rerun to ensure arr1's chunk results still exist
r4 = sess.run(arr1 + 1)
np.testing.assert_array_equal(r4[:3, :3], np.ones((3, 3)) * 3)
arr2 = mt.ones((10, 5), chunk_size=3)
r5 = sess.run(arr2)
np.testing.assert_array_equal(r5[:3, :3], np.ones((3, 3)) * 2)
r6 = sess.run(arr2 + 1)
np.testing.assert_array_equal(r6[:3, :3], np.ones((3, 3)) * 3)
df = md.DataFrame(np.random.rand(10, 2), columns=list('ab'))
s = df['a'].map(lambda x: np.ones((3, 3)), dtype='object').sum()
np.testing.assert_array_equal(s.execute().fetch(), np.ones((3, 3)) * 10)
# test fetch multiple tensors
raw = np.random.rand(5, 10)
arr1 = mt.ones((5, 10), chunk_size=5)
arr2 = mt.tensor(raw, chunk_size=3)
arr3 = mt.sum(arr2)
sess.run(arr1, arr2, arr3)
fetch1, fetch2, fetch3 = sess.fetch(arr1, arr2, arr3)
np.testing.assert_array_equal(fetch1, np.ones((5, 10)))
np.testing.assert_array_equal(fetch2, raw)
np.testing.assert_almost_equal(fetch3, raw.sum())
fetch1, fetch2, fetch3 = sess.fetch([arr1, arr2, arr3])
np.testing.assert_array_equal(fetch1, np.ones((5, 10)))
np.testing.assert_array_equal(fetch2, raw)
np.testing.assert_almost_equal(fetch3, raw.sum())
raw = np.random.rand(5, 10)
arr = mt.tensor(raw, chunk_size=5)
s = arr.sum()
self.assertAlmostEqual(s.execute().fetch(), raw.sum())
def _execute_ds(*_): # pragma: no cover
raise ValueError('cannot run random again')
try:
register(ArrayDataSource, _execute_ds)
self.assertAlmostEqual(s.fetch(), raw.sum())
finally:
del Executor._op_runners[ArrayDataSource]
def testDecref(self):
sess = new_session()
arr1 = mt.ones((10, 5), chunk_size=3)
arr2 = mt.ones((10, 5), chunk_size=3)
sess.run(arr1)
sess.run(arr2)
sess.fetch(arr1)
executor = sess._sess._executor
self.assertEqual(len(executor.chunk_result), 4) # 4 kinds of shapes
del arr1
self.assertEqual(len(executor.chunk_result), 4)
del arr2
self.assertEqual(len(executor.chunk_result), 0)
def testWithoutCompose(self):
sess = new_session()
arr1 = (mt.ones((10, 10), chunk_size=3) + 1) * 2
r1 = sess.run(arr1)
arr2 = (mt.ones((10, 10), chunk_size=4) + 1) * 2
r2 = sess.run(arr2, compose=False)
np.testing.assert_array_equal(r1, r2)
def testDataFrameCreate(self):
sess = new_session()
tensor = mt.ones((2, 2))
df = md.DataFrame(tensor)
df_result = sess.run(df)
df2 = md.DataFrame(df)
df2 = sess.run(df2)
np.testing.assert_equal(df_result.values, np.ones((2, 2)))
pd.testing.assert_frame_equal(df_result, df2)
raw_a = np.random.rand(10)
raw_b = np.random.randint(1000, size=10)
df = md.DataFrame({'a': mt.tensor(raw_a), 'b': mt.tensor(raw_b)}, columns=['b', 'a'])
df_result = sess.run(df)
pd.testing.assert_frame_equal(
df_result, pd.DataFrame({'a': raw_a, 'b': raw_b}, columns=['b', 'a']))
def testDataFrameTensorConvert(self):
# test from_tensor(), from_dataframe(), to_tensor(), to_dataframe()
sess = new_session()
tensor = mt.ones((2, 2))
df = tensor.to_dataframe()
np.testing.assert_equal(sess.run(df), np.ones((2, 2)))
tensor2 = mt.from_dataframe(df)
np.testing.assert_equal(sess.run(tensor2), np.ones((2, 2)))
tensor3 = tensor2.from_dataframe(df)
np.testing.assert_equal(sess.run(tensor3), np.ones((2, 2)))
tensor4 = df.to_tensor()
np.testing.assert_equal(sess.run(tensor4), np.ones((2, 2)))
df = md.dataframe_from_tensor(tensor3)
np.testing.assert_equal(sess.run(df).values, np.ones((2, 2)))
df = df.from_tensor(tensor3)
np.testing.assert_equal(sess.run(df).values, np.ones((2, 2)))
# test raise error exception
with self.assertRaises(TypeError):
md.dataframe_from_tensor(mt.ones((1, 2, 3)))
# test exception
tensor = md.dataframe_from_tensor(mt.array([1, 2, 3]))
np.testing.assert_equal(sess.run(tensor), np.array([1, 2, 3]).reshape(3, 1))
def testDataFrameExecution(self):
sess = new_session()
raw = pd.DataFrame(np.random.rand(5, 3),
index=pd.date_range('2020-1-1', periods=5))
for chunk_size in (3, 5):
df = md.DataFrame(raw, chunk_size=chunk_size)
r = df.loc['2020-1-2']
result = sess.run(r)
pd.testing.assert_series_equal(result, raw.loc['2020-1-2'])
df = md.DataFrame(raw, chunk_size=chunk_size)
df2 = df[[0, 2]].dropna().head(4).copy()
df3 = df2[df2[0] > 0.5]
result = sess.run(df3)
expected = raw[[0, 2]].dropna().head(4).copy()
expected = expected[expected[0] > 0.5]
pd.testing.assert_frame_equal(result, expected)
@unittest.skipIf(pa is not None, 'this test aims to test usage of ArrowDtype '
'when pyarrow not installed')
def testDataFrameWithArrowDtypeExecution(self):
sess = new_session()
# test ArrowDtype when pyarrow not installed
raw = pd.DataFrame({'a': [f's{i}' for i in range(10)],
'b': np.random.rand(10)})
df = md.DataFrame(raw, chunk_size=5)
df['a'] = df['a'].astype('Arrow[string]')
r = df.groupby('a').sum() # can work for expression
with self.assertRaises(ImportError):
# cannot perform execution
# due to the reason that pyarrow not installed
_ = sess.run(r)
def testFetchSlices(self):
sess = new_session()
arr1 = mt.random.rand(10, 8, chunk_size=3)
r1 = sess.run(arr1)
r2 = sess.fetch(arr1[:2, 3:9])
np.testing.assert_array_equal(r2, r1[:2, 3:9])
r3 = sess.fetch(arr1[0])
np.testing.assert_array_equal(r3, r1[0])
def testFetchDataFrameSlices(self):
sess = new_session()
arr1 = mt.random.rand(10, 8, chunk_size=3)
df1 = md.DataFrame(arr1)
r1 = sess.run(df1)
r2 = sess.fetch(df1.iloc[:, :])
pd.testing.assert_frame_equal(r2, r1.iloc[:, :])
r3 = sess.fetch(df1.iloc[1])
|
pd.testing.assert_series_equal(r3, r1.iloc[1])
|
pandas.testing.assert_series_equal
|
# File: build_game_tables.py
# Date Created: 2018-11-10
# Author(s): <NAME>
# Purpose: Validates retrosheet and 538 wrangling against each other and builds db tables
import pandas as pd
import numpy as np
import datetime
import psycopg2
from ignore import db_cred
df_retro =
|
pd.read_csv('ignore\\large_data\\game_data_retrosheet2\\combined_retrosheet.csv')
|
pandas.read_csv
|
import pandas as pd
#importing all the data from CSV files
master_df = pd.read_csv('People.csv', usecols=['playerID', 'nameFirst', 'nameLast', 'bats', 'throws', 'debut', 'finalGame'])
fielding_df = pd.read_csv('Fielding.csv',usecols=['playerID','yearID','stint','teamID','lgID','POS','G','GS','InnOuts','PO','A','E','DP'])
batting_df = pd.read_csv('Batting.csv')
awards_df = pd.read_csv('AwardsPlayers.csv', usecols=['playerID','awardID','yearID'])
allstar_df = pd.read_csv('AllstarFull.csv', usecols=['playerID','yearID'])
hof_df =
|
pd.read_csv('HallOfFame.csv',usecols=['playerID','yearid','votedBy','needed_note','inducted','category'])
|
pandas.read_csv
|
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas
import arctern
def test_suite():
from multiprocessing import Process
import time
p1 = Process(target=ST_Intersection)
p2 = Process(target=ST_Equals)
p3 = Process(target=ST_Touches)
p4 = Process(target=ST_Overlaps)
p5 = Process(target=ST_Crosses)
p6 = Process(target=ST_Point)
p7 = Process(target=ST_Contains)
p8 = Process(target=ST_Intersects)
p9 = Process(target=ST_Distance)
p10 = Process(target=ST_DistanceSphere)
p11 = Process(target=ST_HausdorffDistance)
p12 = Process(target=ST_PolygonFromEnvelope)
start = time.time()
p1.start()
p2.start()
p3.start()
p4.start()
p5.start()
p6.start()
p7.start()
p8.start()
p9.start()
p10.start()
p11.start()
p12.start()
p1.join()
p2.join()
p3.join()
p4.join()
p5.join()
p6.join()
p7.join()
p8.join()
p9.join()
p10.join()
p11.join()
p12.join()
end = time.time()
print('Task runs %0.2f seconds.' % ((end - start)))
def ST_Intersection():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Intersection(data1, data2)
assert len(rst) == 40000000
def ST_Equals():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Equals(data1, data2)
assert len(rst) == 40000000
def ST_Touches():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Touches(data1, data2)
assert len(rst) == 40000000
def ST_Overlaps():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Overlaps(data1, data2)
assert len(rst) == 40000000
def ST_Crosses():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Crosses(data1, data2)
assert len(rst) == 40000000
def ST_Point():
geo1 = 1.1
geo2 = 2.1
arr1 = [geo1 for x in range(1, 40000001)]
arr2 = [geo2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Point(data1, data2)
assert len(rst) == 40000000
def ST_Contains():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Contains(data1, data2)
assert len(rst) == 40000000
def ST_Intersects():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Intersects(data1, data2)
assert len(rst) == 40000000
def ST_Within():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 =
|
pandas.Series(arr1)
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 11:52:00 2020
@author: <NAME>
Ewp
Takes a little while to run.
This code is a pretty average (poorly named variables and reuse of dat, and different names for the moorings).
All of the values are calculated on the fly and printed. Not saved anywhere.
Quite inefficient, but provided as is.
Recommended to turn warnings off to save the data and put in text.
Could have turned this into a function. Have refractored where possible but this script is provided as is.
Requirements:
processed/combined_dataset/month_data_exports.nc
processed/flux/pco2grams.nc
Produces:
figs/Figure5a_ENSO_seasonality.png
"""
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
from carbon_math import *
from matplotlib.dates import MonthLocator, DateFormatter
from matplotlib.ticker import FuncFormatter
xl0=0.0
yl0=0.18
xl1=0.0
yl1=0.18
xl2=-0.09
yl2=0
lanina=pd.read_csv('processed/indexes/la_nina_events.csv')
cp_nino=pd.read_csv('processed/indexes/cp_events.csv')
ep_nino=pd.read_csv('processed/indexes/ep_events.csv')
fp='processed/combined_dataset/month_data_exports.nc'
info=xr.open_mfdataset(fp).sel(Mooring=195).to_dataframe()
#Process EP, CP and Nino events.
nina=pd.DataFrame()
ep=pd.DataFrame()
cp=pd.DataFrame()
for i in lanina.iterrows(): nina=nina.append(info[slice(i[1].start,i[1].end)])
for i in ep_nino.iterrows(): ep=ep.append(info[slice(i[1].start,i[1].end)])
for i in cp_nino.iterrows(): cp=cp.append(info[slice(i[1].start,i[1].end)])
nina_dates=nina.index
ep_dates=ep.index[4:]
cp_dates=cp.index
# %% Load Useful files
seamask=xr.open_dataset('processed/seamask.nc') #Because 2020 version doesn't have it.
seamask= seamask.assign_coords(lon=(seamask.lon % 360)).roll(lon=(seamask.dims['lon']),roll_coords=False).sortby('lon')
#landsch_fp='datasets/co2/landschutzer_co2/spco2_MPI_SOM-FFN_v2018.nc'
landsch_fp='datasets/co2/landschutzer_co2/spco2_MPI-SOM_FFN_v2020.nc'
landschutzer=xr.open_dataset(landsch_fp)
landschutzer= landschutzer.assign_coords(lon=(landschutzer.lon % 360)).roll(lon=(landschutzer.dims['lon']),roll_coords=False).sortby('lon') #EPIC 1 line fix for the dateline problem.
land_pac=landschutzer.sel(lon=slice(120,290),lat=slice(-20,20))
land_pac_all=landschutzer.sel(lon=slice(120,290),lat=slice(-20,20))
land_pac=land_pac.fgco2_smoothed
atmco2=land_pac_all.atm_co2
dco2=land_pac_all.dco2
pco2=land_pac_all.spco2_smoothed
kw=land_pac_all.kw
f_ratios=xr.open_mfdataset('processed/flux/fratios.nc')
ratio=f_ratios.laws2011a#laws2000#laws2000,laws2011a,laws2011b,henson2011
npp1=xr.open_dataset('processed/flux/avg_npp_rg_cafe.nc')
avg_npp=(npp1.avg_npp/1000)*ratio
land=moles_to_carbon(land_pac)/365 #LANDSCHUTZ
land['time']=land.time.astype('datetime64[M]')
diff=land-avg_npp
diff1=diff.where((diff<0.1)|(diff<-0.1),np.nan)
sst = xr.open_dataset('datasets/sst/sst.mnmean.nc')
sst= sst.assign_coords(lon=(sst.lon % 360)).roll(lon=(sst.dims['lon']),roll_coords=False).sortby('lon') #EPIC 1 line fix for the dateline problem.
sst=sst.sel(lon=slice(120,290),lat=slice(20,-20)).sst
sst=sst.where(seamask.seamask==1)
#startday=np.datetime64('2000-01-01')
#endday=np.datetime64('2019-12-01')
#wu=xr.open_dataset('datasets/uwnd.mon.mean.nc').sel(level=1000,lat=0,lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).uwnd
#wv=xr.open_dataset('datasets/vwnd.mon.mean.nc').sel(level=1000,lat=0,lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).vwnd
wu=xr.open_dataset('datasets/uwnd.10m.mon.mean.nc').sel(level=10,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).uwnd
wv=xr.open_dataset('datasets/vwnd.10m.mon.mean.nc').sel(level=10,lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).vwnd
ws_ncep2=np.sqrt((wu**2)+(wv**2))
#CHeck line 164 depending if using NCEP2 or windspeed
ws=xr.open_dataarray('datasets/CCMP_windspeed.nc')
#wind=uw.sel(lat=)
precip= xr.open_dataset('datasets/precip.mon.mean.enhanced.nc').sel(lat=slice(20,-20),lon=slice(120,290),time=slice('1997-07-01','2020-01-01')).precip
newprod=avg_npp.sel(lat=slice(-15,15))#.interpolate_na(dim='time').sel(time=slice(startday,endday))
co2=land.sel(lat=slice(-15,15))#.interpolate_na(dim='time').sel(time=slice(startday,endday))
pco2=pco2.sel(lat=slice(-15,15))#.interpolate_na(dim='time').sel(time=slice(startday,endday))
pco2['time']=pco2.time.astype('datetime64[M]')
kw['time']=kw.time.astype('datetime64[M]')
dco2['time']=dco2.time.astype('datetime64[M]')
#pco2=pco2_intrp
kw1=kw.sel(lat=slice(-15,15))#.interpolate_na(dim='time').sel(time=slice(startday,endday))
sst=sst.sel(lat=slice(15,-15))#.interpolate_na(dim='time').sel(time=slice(startday,endday))
chl=xr.open_dataset('processed/flux/tpca.nc').tpca#'sw_month.nc')
chl['time']=chl.time.astype('datetime64[M]')
# %%
#Test windspeed
minlat=-2
maxlat=2
#enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col='Year')
enso=pd.read_csv('datasets/indexes/meiv2.csv',index_col=0,header=None)
enso=enso.iloc[3:] #Just so Both EMI and MEI start in 1981-01-01
enso_flat=enso.stack()
enso_dates=pd.date_range('1982','2020-07-01',freq='M')- pd.offsets.MonthBegin(1) #Probably want to check this is correct if updating.
enso_timeseries=pd.DataFrame({'Date':enso_dates,'mei':enso_flat})
enso_timeseries=enso_timeseries.where((enso_timeseries.Date>np.datetime64('1997-01-01'))&(enso_timeseries.Date<np.datetime64('2000-01-01'))).dropna()
plt.figure(figsize=(30,10))
plt.subplot(151)
wu1=xr.open_dataset('datasets/uwnd.10m.mon.mean.nc').sel(level=10,lat=slice(20,-20),lon=slice(120,290)).uwnd
d=wu1.sel(lat=slice(maxlat,minlat),time=slice(np.datetime64('1997-01-01'),np.datetime64('2000-01-01'))).mean(dim='lat')
d.plot.contourf(vmin=-6,vmax=6)
gc=plt.gca()
gc.invert_yaxis()
positions = (140, 160, 180,200,220,240,260)
labels = ("140$^\circ$E", "160$^\circ$E", "180$^\circ$",'160$^\circ$W','140$^\circ$W','120$^\circ$W','100$^\circ$W')
plt.xticks(positions, labels)
plt.title('zonal (uwnd) speed')
plt.xlim(150,265)
plt.subplot(152)
wv1=xr.open_dataset('datasets/vwnd.10m.mon.mean.nc').sel(level=10,lat=slice(20,-20),lon=slice(120,290)).vwnd
d=wv1.sel(lat=slice(maxlat,minlat),time=slice(np.datetime64('1997-01-01'),np.datetime64('2000-01-01'))).mean(dim='lat')
d.plot.contourf(vmin=-6,vmax=6)
gc=plt.gca()
gc.invert_yaxis()
positions = (140, 160, 180,200,220,240,260)
labels = ("140$^\circ$E", "160$^\circ$E", "180$^\circ$",'160$^\circ$W','140$^\circ$W','120$^\circ$W','100$^\circ$W')
plt.xticks(positions, labels)
plt.xlim(150,265)
plt.title('meridional (vwnd) speed')
plt.subplot(153)
nws=ws.sel(time=slice(np.datetime64('1997-01-01'),np.datetime64('2000-01-01')),lat=slice(minlat,maxlat)).mean(dim='lat')
nws.plot.contourf()
gc=plt.gca()
gc.invert_yaxis()
positions = (140, 160, 180,200,220,240,260)
labels = ("140$^\circ$E", "160$^\circ$E", "180$^\circ$",'160$^\circ$W','140$^\circ$W','120$^\circ$W','100$^\circ$W')
plt.xticks(positions, labels)
plt.xlim(150,265)
plt.title('Wind Speed (m/s)')
plt.subplot(154)
nws=ws.sel(time=slice(np.datetime64('1997-01-01'),np.datetime64('2000-01-01')),lat=slice(minlat,maxlat)).mean(dim='lat')
(nws-nws.mean(dim='time')).plot.contourf(cmap='bwr')
gc=plt.gca()
gc.invert_yaxis()
positions = (140, 160, 180,200,220,240,260)
labels = ("140$^\circ$E", "160$^\circ$E", "180$^\circ$",'160$^\circ$W','140$^\circ$W','120$^\circ$W','100$^\circ$W')
plt.xticks(positions, labels)
plt.title('Wind Speed Anomaly (m/s, wind-wind mean)')
plt.xlim(150,265)
plt.subplot(155)
plt.title('MEI')
plt.plot(enso_timeseries.mei,enso_timeseries.Date,linewidth=5,c='k')
gc=plt.gca()
gc.invert_yaxis()
plt.tight_layout()
# %% Plot them
latbnd=1
co2_eq=co2.sel(lat=slice(-latbnd,latbnd)).mean(dim='lat')
co2_ep=co2_eq.sel(time=ep_dates).mean(dim='time')
co2_cp=co2_eq.sel(time=cp_dates).mean(dim='time')
co2_nina=co2_eq.sel(time=nina_dates).mean(dim='time')
co2_neutral=co2_eq.drop_sel(time=ep_dates.append(cp_dates).append(nina_dates)).mean(dim='time')
dco2_eq=dco2.sel(lat=slice(-latbnd,latbnd)).mean(dim='lat')
dco2_ep=dco2_eq.sel(time=ep_dates).mean(dim='time')
dco2_cp=dco2_eq.sel(time=cp_dates).mean(dim='time')
dco2_nina=dco2_eq.sel(time=nina_dates).mean(dim='time')
dco2_neutral=dco2_eq.drop_sel(time=ep_dates.append(cp_dates).append(nina_dates)).mean(dim='time')
np_eq=newprod.sel(lat=slice(-latbnd,latbnd)).mean(dim='lat')
np_ep=np_eq.sel(time=ep_dates).mean(dim='time')
np_cp=np_eq.sel(time=cp_dates).mean(dim='time')
np_nina=np_eq.sel(time=nina_dates).mean(dim='time')
np_neutral=np_eq.drop_sel(time=ep_dates.append(cp_dates).append(nina_dates)).mean(dim='time')
sst_eq=sst.sel(lat=slice(latbnd,-latbnd)).mean(dim='lat')
sst_ep=sst_eq.sel(time=ep_dates).mean(dim='time')
sst_cp=sst_eq.sel(time=cp_dates).mean(dim='time')
sst_nina=sst_eq.sel(time=nina_dates).mean(dim='time')
sst_neutral=sst_eq.drop_sel(time=ep_dates.append(cp_dates).append(nina_dates)).mean(dim='time')
#Remove the cp[:-7] and change -latbnd,latbnd to latbnd,-latbnd for NCEP2 rather than CCMP
ws_eq=ws.sel(lat=slice(-latbnd,latbnd)).mean(dim='lat')
ws_eq_NCEP2=ws_ncep2.sel(lat=slice(latbnd,-latbnd)).mean(dim='lat')
ws_ep=ws_eq.sel(time=ep_dates).mean(dim='time')
ws_cp=ws_eq.sel(time=cp_dates[:-7]).mean(dim='time')
ws_nina=ws_eq.sel(time=nina_dates).mean(dim='time')
ws_neutral=ws_eq.drop_sel(time=ep_dates.append(cp_dates[:-7]).append(nina_dates)).mean(dim='time')
chl_eq=chl.sel(lat=slice(-latbnd,latbnd)).mean(dim='lat')
chl_ep=chl_eq.sel(time=ep_dates).mean(dim='time')
chl_cp=chl_eq.sel(time=cp_dates[:-5]).mean(dim='time')
chl_nina=chl_eq.sel(time=nina_dates).mean(dim='time')
chl_neutral=chl_eq.drop_sel(time=ep_dates.append(cp_dates[:-5]).append(nina_dates)).mean(dim='time')
prec_eq=precip.sel(lat=slice(latbnd+0.5,-latbnd-0.5)).mean(dim='lat')
prec_ep=prec_eq.sel(time=ep_dates).mean(dim='time')
prec_cp=prec_eq.sel(time=cp_dates).mean(dim='time')
prec_nina=prec_eq.sel(time=nina_dates).mean(dim='time')
prec_neutral=prec_eq.drop_sel(time=ep_dates.append(cp_dates).append(nina_dates)).mean(dim='time')
# %% ENSO FIGURE
#ENSO BREAKDOWN
positions = (140, 160, 180,200,220,240,260)
labels = ("140$^\circ$E", "160$^\circ$E", "180$^\circ$",'160$^\circ$W','140$^\circ$W','120$^\circ$W','100$^\circ$W')
plt.figure(figsize=(10,8))
plt.subplot(4,2,1)
plt.plot(sst_ep.lon,sst_ep,label='East Pacific',c='darkred')
plt.plot(sst_cp.lon,sst_cp,label='Central Pacific',c='darkred',linestyle='--')
plt.plot(sst_nina.lon,sst_nina, label='La Nina',c='blue')
plt.plot(sst_ep.lon,sst_neutral,label='Neutral',c='k')
#plt.title('SST')
plt.xlim([140,265])
plt.grid()
#plt.xlabel('Mooring')
plt.ylabel('Degree C')
plt.ylim([23,31])
plt.title('a) SST',loc='left')
#print('SST')
plt.xticks(positions, labels)
ax = plt.subplot(4,2,3)
plt.plot(chl_ep.lon,chl_ep,label='East Pacific',c='darkred')
plt.plot(chl_cp.lon,chl_cp,label='Central Pacific',c='darkred',linestyle='--')
plt.plot(chl_nina.lon,chl_nina, label='La Nina',c='blue')
plt.plot(chl_ep.lon,chl_neutral,label='Neutral',c='k')
#plt.title('TPCA Chlorophyll')
plt.xlim([140,265])
plt.ylim([0.05,0.35])
plt.ylabel('mg Chl m$^{-3}$ day$^{-1}$')
plt.title('c) TPCA Chlorophyll',loc='left')
plt.grid()
plt.xticks(positions, labels)
ax = plt.subplot(4,2,4)
plt.plot(np_ep.lon,np_ep,label='New Production East Pacific',c='darkred')
plt.plot(np_cp.lon,np_cp,label='New Production Central Pacific',c='darkred',linestyle='--')
plt.plot(np_nina.lon,np_nina, label='New Producion Nina',c='blue')
plt.plot(np_ep.lon,np_neutral,label='New Production Neutral',c='k')
plt.xlim([140,265])
plt.xticks(positions, labels)
plt.grid()
plt.ylim([xl0,yl0])
print('NEW PRODUCTION')
plt.ylabel('gC m$^{-2}$ day$^{-1}$')
plt.title('d) New production',loc='left')
#ENSO BREAKDOWN
plt.subplot(426)
plt.plot(dco2_ep.lon,dco2_ep,label='East Pacific',c='darkred')
plt.plot(dco2_cp.lon,dco2_cp,label='Central Pacific',c='darkred',linestyle='--')
plt.plot(dco2_nina.lon,dco2_nina, label='La Nina',c='blue')
plt.plot(dco2_ep.lon,dco2_neutral,label='Neutral',c='k')
plt.xlim([140,265])
plt.xticks(positions, labels)
plt.grid()
plt.ylabel('ฮผatm')
plt.ylim([0,110])
plt.title('f) \u0394pCO$_{2}$',loc='left')
print('pCO2')
#ENSO BREAKDOWN
plt.subplot(4,2,2)
plt.plot(ws_ep.lon,ws_ep,label='East Pacific',c='darkred')
plt.plot(ws_cp.lon,ws_cp,label='Central Pacific',c='darkred',linestyle='--')
plt.plot(ws_nina.lon,ws_nina, label='La Nina',c='blue')
plt.plot(ws_ep.lon,ws_neutral,label='Neutral',c='k')
plt.xticks(positions, labels)
plt.grid()
plt.ylim([2,7])
plt.xlim([140,265])
plt.ylabel('m s$^{-1}$')
plt.title('b) Wind speed',loc='left')
print('WINDSPEED')
# Calculate and Put mooring data on just for reference
means=pd.DataFrame()
final_mooring_enso=pd.DataFrame()
final_mooring_enso_avgs=pd.DataFrame()
ty='month' #Actually month though need to fix this.
fp='processed/combined_dataset/month_data_exports.nc'
moorings=['110W','125W','140W','155W','170W','165E']#[::-1]
lns=[165,190,205,220,235,250][::-1]
moors=[110, 125, 140, 155, 170, 195]
for i, mooring_name in enumerate(lns):
fp='processed/combined_dataset/month_data_exports.nc'
try:
dat=xr.open_mfdataset(fp).sel(Mooring=int(moors[i]))
except:
dat=xr.open_mfdataset(fp).sel(Mooring=195)
#plt.subplot(6,1,i+1)
#pco2_m=pco2_month.sel(lat=0,lon=mooring_name,method='nearest')
d=dat#.sel(Date=slice(pco2_month.time.min().values,pco2_month.time.max().values))
#pco2_m=d.windspeed#seasonaltrend_sstobs[i]
#mei=d.mei
#We want to create a table of NINO, NINA, neutral and all CO2 and NP averages for each mooring
info=dat.to_dataframe()
info['select_model']=info.laws2011a*info.cafe/1000 #Was cbpmmean
info['co2']=info.co2flux4_land_gmyr/365
info=info[~np.isnan(info.co2)]
nino1=info[info.index.isin(ep_dates)]#info[info.mei>0.5]#.mean()
modoki=info[info.index.isin(cp_dates)]#info[info.emi>0.5]#.mean()
#nmodoki=info[info.emi<-0.5]
nina1=info[info.index.isin(nina_dates)]#info[info.mei<-0.5]#.mean()
neutral1=info[~info.index.isin(cp_dates)]#info[(info.mei<0.5)&(info.mei>-0.5)]#.mean()
neutral1=neutral1[~neutral1.index.isin(ep_dates)]
neutral1=neutral1[~neutral1.index.isin(nina_dates)]
EP=nino1.mean() #Remove if it is in MODOKI
modoki=modoki.mean()
#nmodoki=nmodoki.mean()
nina1=nina1.mean()
neutral1=neutral1.mean()
nino1=nino1.mean()
#Calculate Windspeed biases vs insitu?
# # plt.show()
# # plt.plot(info.windspeed.index,info.windspeed)
# # ws_eq.sel(lon=mooring_name,method='nearest').plot()
# # ws_eq_NCEP2.sel(lon=mooring_name,method='nearest').plot()
# wws_ccmp=ws_eq.sel(lon=mooring_name,method='nearest')-info.windspeed[:-8]
# wws_ncep2=ws_eq_NCEP2.sel(lon=mooring_name,method='nearest').sel(time=slice('1997-01-01','2019-12-01'))-info.windspeed[6:]
# wws_ccmp.plot(),wws_ncep2.plot()
# plt.show()
#pd.Serie
wsdf ={#'El Nino':nino1.windspeed,
'La Nina':nina1.windspeed,
# 'Cold Modoki':nmodoki.windspeed,
'Neutral':neutral1.windspeed,
'CP El Nino':modoki.windspeed,
'EP El Nino':EP.windspeed
# 'All time':info.co2.mean()-info.select_model.mean()
}
ensoavgs=pd.Series(wsdf,name=moorings[i][0:3]+'\xb0'+moorings[i][3:4])
final_mooring_enso=final_mooring_enso.append(ensoavgs)
for x in final_mooring_enso.T.iterrows():
# if 'All time' in x[0]:
# c='k'
print(x[0])
print(x[0]=='CP El Nino')
ls='-'
markr='o'
markers=5
if 'Neutral' in x[0]:
c='black'
#elif 'Nino' in x[0]:
# c='darkred'
elif 'Nina' in x[0]:
c='blue'
markers=7
elif 'Cold Modoki' in x[0]:
c='royalblue'
ls='--'
elif 'EP' in x[0]:
c='darkred'
elif 'CP' in x[0]:
c='red'
ls='--'
markers=8
markr='x'
plt.plot(lns,x[1],c=c,marker=markr,label=x[0],linewidth=0,alpha=0.9,markersize=markers)
ax = plt.subplot(4,2,5)
plt.plot(prec_ep.lon,prec_ep,label='East Pacific',c='darkred')
plt.plot(prec_cp.lon,prec_cp,label='Central Pacific',c='darkred',linestyle='--')
plt.plot(prec_nina.lon,prec_nina, label='La Nina',c='blue')
plt.plot(prec_neutral.lon,prec_neutral,label='Neutral',c='k')
plt.xlim([140,265])
plt.grid()
plt.ylabel('mm day$^{-1}$')
plt.title('e) Precipitation',loc='left')
plt.xticks(positions, labels)
#plt.ylim([xl0,yl0])
means=
|
pd.DataFrame()
|
pandas.DataFrame
|
import datetime
import logging
import os
import pandas as pd
import statsmodels.api as stats
from pandas.tseries.offsets import BDay
from data_fetcher import get_bitcoin_prices, get_altcoin_prices_from_poloniex
logging.basicConfig(format='%(level_name)s: %(message)s', level=logging.DEBUG)
class CryptoAnalyzerBase(object):
DATA_FOLDER = 'crypto_data'
BTCUSD = 'btcusd'
def __init__(self, ticker, hist_start_date=None, refresh=False):
self.ticker = ticker
self.refresh = refresh
self.altcoin_data =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pyrtlsdr.pyrtlsdr_wrapper as sdr_wrapper
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import matplotlib.mlab
c_sample_rate = 2e6
c_gain = 100
c_number_of_samples = 128*1024 # TODO: Check - decrease input samples for quicker scan
c_start_freq = 91.0e6
c_scanner_delta = int(c_sample_rate / 2)
c_scanner_steps = 9
c_normalize_scan = True
c_time_steps = 100
c_warmup_steps = 3
sdr_client = sdr_wrapper.PyRtlSdrWrapper(
sample_rate=c_sample_rate,
gain=c_gain
)
df_ret = pd.DataFrame()
for _ in range(c_warmup_steps):
print("(Warmup Scan)")
_ = sdr_client.scan_freq(
center_freq=c_start_freq,
number_of_samples=c_number_of_samples
)
for i_time_step in range(c_time_steps):
print()
print("Scanning...", i_time_step+1, "from", c_time_steps)
for i_scan_step in range(c_scanner_steps):
target_freq = c_start_freq + i_scan_step * c_scanner_delta
samples = sdr_client.scan_freq(
center_freq=target_freq,
number_of_samples=c_number_of_samples
)
pxx, frequencies = matplotlib.mlab.psd(samples, NFFT=64, Fs=c_sample_rate/1e6)
pxx = 10.0*np.log10(pxx)
frequencies += target_freq/1e6
if c_normalize_scan:
# pxx/=np.std(pxx)
pxx -= np.mean(pxx)
# Modify for plot
pxx = (pxx/10)-100
t = float(i_time_step + (i_scan_step / c_scanner_steps))
df_t =
|
pd.DataFrame({"freq": frequencies, "pxx": pxx, "t": t})
|
pandas.DataFrame
|
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 +
|
DateOffset(hours=1)
|
pandas.DateOffset
|
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = pd.Series([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = pd.Series([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = pd.Series([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = pd.Series([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = pd.Series([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = pd.Series([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = pd.Series([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = pd.Series([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = pd.Series([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = pd.Series([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = pd.Series([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = pd.Series([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = pd.Series([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_loec")
self.cbt_inv_bw_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_noec")
self.cbt_inv_bw_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_loec")
self.cbt_inv_bw_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_bw_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body diet (mg-pest/kg-food(ww))
self.cbt_inv_food_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inmill_mort")
self.cbt_inv_food_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inten_mort")
self.cbt_inv_food_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_food_low_lc50")
self.cbt_inv_food_sub_direct = pd.Series([], dtype="float", name="cbt_inv_food_sub_direct")
self.cbt_inv_food_grow_noec = pd.Series([], dtype="float", name="cbt_inv_food_grow_noec")
self.cbt_inv_food_grow_loec = pd.Series([], dtype="float", name="cbt_inv_food_grow_loec")
self.cbt_inv_food_repro_noec = pd.Series([], dtype="float", name="cbt_inv_food_repro_noec")
self.cbt_inv_food_repro_loec = pd.Series([], dtype="float", name="cbt_inv_food_repro_loec")
self.cbt_inv_food_behav_noec = pd.Series([], dtype="float", name="cbt_inv_food_behav_noec")
self.cbt_inv_food_behav_loec = pd.Series([], dtype="float", name="cbt_inv_food_behav_loec")
self.cbt_inv_food_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_noec")
self.cbt_inv_food_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_loec")
self.cbt_inv_food_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_food_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates soil (mg-pest/kg-soil(dw))
self.cbt_inv_soil_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inmill_mort")
self.cbt_inv_soil_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inten_mort")
self.cbt_inv_soil_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_soil_low_lc50")
self.cbt_inv_soil_sub_direct = pd.Series([], dtype="float", name="cbt_inv_soil_sub_direct")
self.cbt_inv_soil_grow_noec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_noec")
self.cbt_inv_soil_grow_loec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_loec")
self.cbt_inv_soil_repro_noec = pd.Series([], dtype="float", name="cbt_inv_soil_repro_noec")
self.cbt_inv_soil_repro_loec = pd.Series([], dtype="float", name="cbt_inv_soil_repro_loec")
self.cbt_inv_soil_behav_noec = pd.Series([], dtype="float", name="cbt_inv_soil_behav_noec")
self.cbt_inv_soil_behav_loec = pd.Series([], dtype="float", name="cbt_inv_soil_behav_loec")
self.cbt_inv_soil_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_soil_sensory_noec")
self.cbt_inv_soil_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_soil_sensory_loec")
self.cbt_inv_soil_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_soil_sub_indirect")
# application rate-based toxicity (arbt) : mammals (lbs active ingredient/Acre)
self.arbt_mamm_mort =
|
pd.Series([], dtype="float", name="arbt_mamm_mort")
|
pandas.Series
|
"""
Access to IFCB raw data files, including directory operations.
"""
import os
from functools import lru_cache
import pandas as pd
from .identifiers import Pid
from .adc import AdcFile, AdcFragment
from .hdr import parse_hdr_file
from .roi import RoiFile
from .utils import BaseDictlike, CaseInsensitiveDict
from .bins import BaseBin
DEFAULT_BLACKLIST = ['skip','beads']
DEFAULT_WHITELIST = ['data']
class Fileset(object):
"""
Represents a set of three raw data files
"""
def __init__(self, basepath):
"""
:param basepath: the base path of the files (no extension)
"""
self.basepath = basepath
@property
def adc_path(self):
"""
The path of the ``.adc`` file.
"""
return self.basepath + '.adc'
@property
def hdr_path(self):
"""
The path of the ``.hdr`` file.
"""
return self.basepath + '.hdr'
@property
def roi_path(self):
"""
The path of the ``.roi`` file.
"""
return self.basepath + '.roi'
@property
@lru_cache()
def pid(self):
"""
A ``Pid`` representing the bin PID
"""
return Pid(os.path.basename(self.basepath))
@property
def lid(self):
"""
The bin's LID
"""
return self.pid.bin_lid
def exists(self):
"""
Checks for existence of all three raw data files.
:returns bool: whether or not all files exist
"""
if not os.path.exists(self.adc_path):
return False
if not os.path.exists(self.hdr_path):
return False
if not os.path.exists(self.roi_path):
return False
return True
# metrics
def getsizes(self):
"""
Get the sizes of the files.
:returns dict: sizes of files with keys
'hdr', 'adc', and 'roi'
"""
hdr_size = os.path.getsize(self.hdr_path)
adc_size = os.path.getsize(self.adc_path)
roi_size = os.path.getsize(self.roi_path)
return {
'hdr': hdr_size,
'adc': adc_size,
'roi': roi_size
}
def getsize(self):
"""
Get the total size of all three files.
:returns int: the total size of all three files
"""
return sum(self.getsizes().values())
def as_bin(self):
"""
:returns: a Bin view of this fileset.
"""
return FilesetBin(self)
def __repr__(self):
return '<IFCB Fileset %s>' % self.basepath
def __str__(self):
return self.basepath
# bin interface to Fileset
class FilesetBin(BaseBin):
"""
Bin interface to Fileset.
Context manager support opens and closes the ``.roi`` file for image
access.
"""
def __init__(self, fileset):
"""
:param fileset: the ``Fileset`` to represent
"""
self.fileset = fileset
self.adc_file = AdcFile(fileset.adc_path)
self.roi_file = RoiFile(self.adc_file, fileset.roi_path)
# oo interface to fileset
@property
@lru_cache()
def hdr_attributes(self):
"""
A ``dict`` representing the headers
"""
return parse_hdr_file(self.fileset.hdr_path)
@property
def timestamp(self):
"""
The bin's timestamp (as a ``datetime``)
"""
return self.pid.timestamp
def to_hdf(self, hdf_file, group=None, replace=True, archive=False):
"""
Convert the fileset to HDF.
:param hdf_file: the root HDF file pathname or
object (``h5py.File`` or ``h5py.Group``) in which to write all raw data
:param group: a path below the sub-group
to use
:param replace: whether to replace any existing data
at that location in the HDF file
:param archive: whether to include the full text of the .hdr
and .roi files
"""
from .hdf import filesetbin2hdf
filesetbin2hdf(self, hdf_file, group=group, replace=replace, archive=archive)
# bin interface
@property
def pid(self):
"""
The bin's PID
"""
return self.fileset.pid
@property
def schema(self):
"""
The bin's schema
"""
return self.adc_file.schema
@property
def images(self):
"""
The images
"""
return self.roi_file
@property
def headers(self):
"""
The header dict
"""
return self.hdr_attributes
def header(self, key):
ci_dict = CaseInsensitiveDict(self.hdr_attributes)
return ci_dict[key]
@property
def adc(self):
"""
The bin's ADC data as a ``pandas.DataFrame``
"""
return self.adc_file.csv
# context manager implementation
def isopen(self):
"""
Is the ``.roi`` file open?
"""
return self.roi_file.isopen()
def close(self):
"""
Close the ``.roi`` file, if it is open.
"""
if self.isopen():
self.roi_file.close()
def __enter__(self):
if not self.isopen():
self.roi_file._open()
return self
def __exit__(self, *args):
self.close()
# support for single image reading
def as_single(self, target):
"""Return a new FilesetBin that only provides access to
a single target. If called immediately upon construction
(before accessing any data) this will avoid parsing the
entire ADC file. Otherwise it will raise ValueError."""
if self.isopen():
raise ValueError('as_single must be called before opening FilesetBin')
return FilesetFragmentBin(self.fileset, target)
def __repr__(self):
return '<FilesetBin %s>' % self
def __str__(self):
return self.fileset.__str__()
# special fileset bin subclass for reading one image fast
class FilesetFragmentBin(FilesetBin):
def __init__(self, fileset, target):
self.fileset = fileset
self.adc_file = AdcFragment(fileset.adc_path, target, target+2)
self.roi_file = RoiFile(self.adc_file, fileset.roi_path)
# listing and finding raw filesets and associated bin objects
def validate_path(filepath, blacklist=DEFAULT_BLACKLIST, whitelist=DEFAULT_WHITELIST):
"""
Validate an IFCB raw data file path.
A well-formed raw data file path relative to some root
only contains path components that are
not blacklisted and either
either whitelisted or part of the file's basename (without
extension).
:param filepath: the pathname of the file
:param blacklist: directory names to ignore
:param whitelist: directory names to include, even if they
do not match the path's basename
:returns bool: if the pathname is valid
"""
if not set(blacklist).isdisjoint(set(whitelist)):
raise ValueError('whitelist and blacklist must be disjoint')
dirname, basename = os.path.split(filepath)
lid, ext = os.path.splitext(basename)
components = dirname.split(os.sep)
for c in components:
if c in blacklist:
return False
if c not in whitelist and c not in lid:
return False
return True
def list_filesets(dirpath, blacklist=DEFAULT_BLACKLIST, whitelist=DEFAULT_WHITELIST, sort=True, validate=True):
"""
Iterate over entire directory tree and yield a Fileset
object for each .adc/.hdr/.roi fileset found. Warning: for
large directories, this is slow.
:param blacklist: list of directory names to ignore
:param whitelist: list of directory names to include, even if they
do not match a file's basename
:param sort: whether to sort output (sorts by alpha)
:param validate: whether to validate each path
"""
if not set(blacklist).isdisjoint(set(whitelist)):
raise ValueError('whitelist and blacklist must be disjoint')
for dp, dirnames, filenames in os.walk(dirpath):
for d in dirnames:
if d in blacklist:
dirnames.remove(d)
if sort:
dirnames.sort(reverse=True)
filenames.sort(reverse=True)
for f in filenames:
basename, extension = f[:-4], f[-3:]
if extension == 'adc' and basename+'.hdr' in filenames and basename+'.roi' in filenames:
if validate:
reldir = dp[len(dirpath)+1:]
if not validate_path(os.path.join(reldir,basename), whitelist=whitelist, blacklist=blacklist):
continue
yield dp, basename
def list_data_dirs(dirpath, blacklist=DEFAULT_BLACKLIST, sort=True, prune=True):
"""
Yield the paths of any descendant directories that contain at least
one ``.adc`` file.
:param blacklist: list of directory names to ignore
:param sort: whether to sort output (sorts by alpha)
:param prune: whether, given a dir with an ``.adc`` file in it, to skip
subdirectories
"""
dirlist = os.listdir(dirpath)
if sort:
dirlist.sort()
for name in dirlist:
if name[-3:] == 'adc':
yield dirpath
if prune:
return
for name in dirlist:
if name not in blacklist:
child = os.path.join(dirpath,name)
if os.path.isdir(child):
yield from list_data_dirs(child, sort=sort, prune=prune)
def find_fileset(dirpath, lid, whitelist=['data'], blacklist=['skip','beads']):
"""
Find a fileset anywhere below the given directory path
given the bin's lid. This assumes that the file's path
is valid.
:returns Fileset: the ``Fileset``, or ``None`` if it is not found.
"""
dirlist = os.listdir(dirpath)
for name in dirlist:
if name == lid + '.adc':
basepath = os.path.join(dirpath,lid)
return Fileset(basepath)
elif name in whitelist or name in lid:
# is the name whitelisted or contains part of the lid?
fs = find_fileset(os.path.join(dirpath,name), lid, whitelist=whitelist, blacklist=blacklist)
if fs is not None:
return fs
# not found
return None
class DataDirectory(object):
"""
Represents a directory containing IFCB raw data.
Provides a dict-like interface allowing access to FilesetBins by LID.
"""
def __init__(self, path='.', whitelist=DEFAULT_WHITELIST, blacklist=DEFAULT_BLACKLIST, filter=lambda x: True):
"""
:param path: the path of the data directory
:param whitelist: a list of directory names to allow
:param blacklist: a list of directory names to disallow
"""
self.path = path
self.whitelist = whitelist
self.blacklist = blacklist
self.filter = filter
def list_filesets(self):
"""
Yield all filesets.
"""
for dirpath, basename in list_filesets(self.path, whitelist=self.whitelist, blacklist=self.blacklist):
basepath = os.path.join(dirpath, basename)
fs = Fileset(basepath)
if self.filter(fs):
yield fs
def find_fileset(self, lid):
"""
Locate a fileset by LID. Returns None if it is not found.
:param lid: the LID to search for
:type lid: str
:returns Fileset: the fileset, or None if not found
"""
fs = find_fileset(self.path, lid, whitelist=self.whitelist, blacklist=self.blacklist)
if fs is None:
return None
elif self.filter(fs):
return fs
def __iter__(self):
# yield from list_filesets called with no keyword args
for fs in self.list_filesets():
yield FilesetBin(fs)
def has_key(self, lid):
# fast contains method that avoids iteration
return self.find_fileset(lid) is not None
def __getitem__(self, lid):
fs = self.find_fileset(lid)
if fs is None:
raise KeyError('No fileset for %s found at or under %s' % (lid, self.path))
return FilesetBin(fs)
def __len__(self):
"""warning: for large datasets, this is very slow"""
return sum(1 for _ in self)
# subdirectories
def list_descendants(self, **kw):
"""
Find all 'leaf' data directories and yield ``DataDirectory``
objects for each one. Note that this enforces blacklisting
but not whitelisting (no fileset path validation is done).
Accepts ``list_data_dirs`` keywords, except ``blacklist`` which
takes on the value given in the constructor.
"""
for dd in list_data_dirs(self.path, blacklist=self.blacklist, **kw):
yield DataDirectory(dd)
def __repr__(self):
return '<DataDirectory %s>' % self.path
def __str__(self):
return self.path
# filters for DataDirectory
def time_filter(start='1970-01-01', end='3000-01-01'):
start = pd.to_datetime(start, utc=True)
end =
|
pd.to_datetime(end, utc=True)
|
pandas.to_datetime
|
import io
import itertools
import pytest
from pandas.util.testing import (
assert_series_equal, assert_frame_equal, assert_index_equal)
from numpy.testing import assert_array_equal
import pandas as pd
import numpy as np
import matplotlib.figure
import matplotlib.pyplot as plt
from upsetplot import plot
from upsetplot import UpSet
from upsetplot import generate_counts, generate_samples
from upsetplot.plotting import _process_data
# TODO: warnings should raise errors
def is_ascending(seq):
# return np.all(np.diff(seq) >= 0)
return sorted(seq) == list(seq)
@pytest.mark.parametrize('x', [
generate_counts(),
generate_counts().iloc[1:-2],
])
@pytest.mark.parametrize('sort_by', ['cardinality', 'degree'])
@pytest.mark.parametrize('sort_categories_by', [None, 'cardinality'])
def test_process_data_series(x, sort_by, sort_categories_by):
assert x.name == 'value'
for subset_size in ['auto', 'legacy', 'sum', 'count']:
for sum_over in ['abc', False]:
with pytest.raises(ValueError, match='sum_over is not applicable'):
_process_data(x, sort_by=sort_by,
sort_categories_by=sort_categories_by,
subset_size=subset_size, sum_over=sum_over)
df, intersections, totals = _process_data(
x, subset_size='auto', sort_by=sort_by,
sort_categories_by=sort_categories_by, sum_over=None)
assert intersections.name == 'value'
x_reordered = (x
.reorder_levels(intersections.index.names)
.reindex(index=intersections.index))
assert len(x) == len(x_reordered)
assert x_reordered.index.is_unique
assert_series_equal(x_reordered, intersections,
check_dtype=False)
if sort_by == 'cardinality':
assert is_ascending(intersections.values[::-1])
else:
# check degree order
assert is_ascending(intersections.index.to_frame().sum(axis=1))
# TODO: within a same-degree group, the tuple of active names should
# be in sort-order
if sort_categories_by:
assert is_ascending(totals.values[::-1])
assert np.all(totals.index.values == intersections.index.names)
assert np.all(df.index.names == intersections.index.names)
assert set(df.columns) == {'_value', '_bin'}
assert_index_equal(df['_value'].reorder_levels(x.index.names).index,
x.index)
assert_array_equal(df['_value'], x)
assert_index_equal(intersections.iloc[df['_bin']].index,
df.index)
assert len(df) == len(x)
@pytest.mark.parametrize('x', [
generate_samples()['value'],
generate_counts(),
])
def test_subset_size_series(x):
kw = {'sort_by': 'cardinality',
'sort_categories_by': 'cardinality',
'sum_over': None}
df_sum, intersections_sum, totals_sum = _process_data(
x, subset_size='sum', **kw)
if x.index.is_unique:
expected_warning = None
else:
expected_warning = FutureWarning
with pytest.warns(expected_warning):
df, intersections, totals = _process_data(
x, subset_size='legacy', **kw)
assert_frame_equal(df, df_sum)
assert_series_equal(intersections, intersections_sum)
assert_series_equal(totals, totals_sum)
if x.index.is_unique:
df, intersections, totals = _process_data(
x, subset_size='auto', **kw)
assert_frame_equal(df, df_sum)
assert_series_equal(intersections, intersections_sum)
assert_series_equal(totals, totals_sum)
else:
with pytest.raises(ValueError):
_process_data(
x, subset_size='auto', **kw)
df_count, intersections_count, totals_count = _process_data(
x, subset_size='count', **kw)
df, intersections, totals = _process_data(
x.groupby(level=list(range(len(x.index.levels)))).count(),
subset_size='sum', **kw)
assert_series_equal(intersections, intersections_count, check_names=False)
assert_series_equal(totals, totals_count)
@pytest.mark.parametrize('sort_sets_by', [None, 'cardinality'])
@pytest.mark.parametrize('x', [
generate_counts(),
])
def test_sort_sets_by_deprecation(x, sort_sets_by):
with pytest.warns(DeprecationWarning, match='sort_sets_by'):
upset1 = UpSet(x, sort_sets_by=sort_sets_by)
with pytest.warns(None):
upset2 = UpSet(x, sort_categories_by=sort_sets_by)
fig = matplotlib.figure.Figure()
upset1.plot(fig)
png1 = io.BytesIO()
fig.savefig(png1, format='raw')
fig = matplotlib.figure.Figure()
upset2.plot(fig)
png2 = io.BytesIO()
fig.savefig(png2, format='raw')
assert png1.getvalue() == png2.getvalue()
@pytest.mark.parametrize('x', [
generate_samples()['value'],
])
@pytest.mark.parametrize('sort_by', ['cardinality', 'degree'])
@pytest.mark.parametrize('sort_categories_by', [None, 'cardinality'])
def test_process_data_frame(x, sort_by, sort_categories_by):
X = pd.DataFrame({'a': x})
with pytest.raises(ValueError, match='Please specify subset_size'):
_process_data(X, sort_by=sort_by,
sort_categories_by=sort_categories_by,
subset_size='legacy', sum_over=None)
with pytest.warns(None):
df, intersections, totals = _process_data(
X, sort_by=sort_by, sort_categories_by=sort_categories_by,
sum_over='a', subset_size='auto')
assert df is not X
# check equivalence to Series
df1, intersections1, totals1 = _process_data(
x, sort_by=sort_by, sort_categories_by=sort_categories_by,
subset_size='sum', sum_over=None)
assert intersections.name == 'a'
assert_frame_equal(df, df1.rename(columns={'_value': 'a'}))
assert_series_equal(intersections, intersections1, check_names=False)
assert_series_equal(totals, totals1)
# check effect of extra column
X = pd.DataFrame({'a': x, 'b': np.arange(len(x))})
df2, intersections2, totals2 = _process_data(
X, sort_by=sort_by, sort_categories_by=sort_categories_by,
sum_over='a', subset_size='auto')
assert_series_equal(intersections, intersections2)
assert_series_equal(totals, totals2)
assert_frame_equal(df, df2.drop('b', axis=1))
assert_array_equal(df2['b'], X['b']) # disregard levels, tested above
# check effect not dependent on order/name
X = pd.DataFrame({'b': np.arange(len(x)), 'c': x})
df3, intersections3, totals3 = _process_data(
X, sort_by=sort_by, sort_categories_by=sort_categories_by,
sum_over='c', subset_size='auto')
assert_series_equal(intersections, intersections3, check_names=False)
assert intersections.name == 'a'
assert intersections3.name == 'c'
assert_series_equal(totals, totals3)
assert_frame_equal(df.rename(columns={'a': 'c'}), df3.drop('b', axis=1))
assert_array_equal(df3['b'], X['b'])
# check subset_size='count'
X = pd.DataFrame({'b': np.ones(len(x), dtype=int), 'c': x})
df4, intersections4, totals4 = _process_data(
X, sort_by=sort_by, sort_categories_by=sort_categories_by,
sum_over='b', subset_size='auto')
df5, intersections5, totals5 = _process_data(
X, sort_by=sort_by, sort_categories_by=sort_categories_by,
subset_size='count', sum_over=None)
assert_series_equal(intersections4, intersections5, check_names=False)
assert intersections4.name == 'b'
assert intersections5.name == 'size'
assert_series_equal(totals4, totals5)
assert_frame_equal(df4, df5)
@pytest.mark.parametrize('x', [
generate_samples()['value'],
generate_counts(),
])
def test_subset_size_frame(x):
kw = {'sort_by': 'cardinality',
'sort_categories_by': 'cardinality'}
X = pd.DataFrame({'x': x})
df_sum, intersections_sum, totals_sum = _process_data(
X, subset_size='sum', sum_over='x', **kw)
df_count, intersections_count, totals_count = _process_data(
X, subset_size='count', sum_over=None, **kw)
# error cases: sum_over=False
for subset_size in ['auto', 'sum', 'count']:
with pytest.raises(ValueError, match='sum_over'):
_process_data(
X, subset_size=subset_size, sum_over=False, **kw)
with pytest.raises(ValueError, match='sum_over'):
_process_data(
X, subset_size=subset_size, sum_over=False, **kw)
# error cases: sum_over incompatible with subset_size
with pytest.raises(ValueError, match='sum_over should be a field'):
_process_data(
X, subset_size='sum', sum_over=None, **kw)
with pytest.raises(ValueError, match='sum_over cannot be set'):
_process_data(
X, subset_size='count', sum_over='x', **kw)
# check subset_size='auto' or 'legacy' with sum_over=str => sum
for subset_size in ['auto', 'legacy']:
df, intersections, totals = _process_data(
X, subset_size=subset_size, sum_over='x', **kw)
assert_frame_equal(df, df_sum)
assert_series_equal(intersections, intersections_sum)
assert_series_equal(totals, totals_sum)
# check subset_size='auto' with sum_over=None => count
df, intersections, totals = _process_data(
X, subset_size='auto', sum_over=None, **kw)
assert_frame_equal(df, df_count)
assert_series_equal(intersections, intersections_count)
assert_series_equal(totals, totals_count)
# check legacy use of sum_over=False
with pytest.warns(DeprecationWarning, match='sum_over=False'):
df, intersections, totals = _process_data(
X, subset_size='legacy', sum_over=False, **kw)
assert_frame_equal(df, df_count)
|
assert_series_equal(intersections, intersections_count)
|
pandas.util.testing.assert_series_equal
|
"""
Script to get the distribution of defined percentage of most important ingredients according to their categories.
Needs a working MongoDB server containing the Open Food Facts database.
"""
import pandas as pd
import pymongo
import tqdm
from data import INGREDIENTS_DISTRIBUTION_FILEPATH
# Minimum number of defined percentage per ingredient
MIN_VALUE_NB = 30
df = pd.DataFrame(columns=['id', 'percent', 'categories_tags'])
client = pymongo.MongoClient()
db = client['off']
products = db['products']
query = {"ingredients": {"$elemMatch": {"percent": {"$exists": True}}}}
cursor = products.find(query,
{'ingredients.percent': 1,
'ingredients.id': 1,
'categories_tags': 1},
no_cursor_timeout=True).batch_size(1000)
# Looping on all products and all ingredients, adding the ingredient and its percentage to the dataframe if it is
# defined
for product in tqdm.tqdm(cursor):
for ingredient in product['ingredients']:
if 'percent' not in ingredient:
continue
# Removing erroneous data
if not (0 < float(ingredient["percent"]) <= 100):
continue
df = df.append({"id": ingredient['id'],
"percent": float(ingredient["percent"]),
"categories_tags": product.get('categories_tags')},
ignore_index=True)
# Removing elements with less than the minimum number of values
counts = df.id.value_counts()
df = df[df.id.isin(counts[counts >= MIN_VALUE_NB].index)]
# Sorting the dataframe by id
df.id =
|
pd.Categorical(df.id, counts.index)
|
pandas.Categorical
|
import pandas as pd
from IPython.display import clear_output
from datetime import timedelta
from .conflict import ConflictManager
class TokensManager:
"""
"""
def __init__(self, all_actions, maxwords):
self.maxwords = maxwords
self.all_actions = all_actions
def get_states(self):
sample = self.all_actions.copy().reset_index(drop=True)
# Get differences of columns 'token_id' and 'rev_time'
diff_actions = sample[['token_id', 'rev_time']] - sample.shift(1)[['token_id', 'rev_time']]
diff_actions = diff_actions.rename({'token_id': 'tokenid_diff', 'rev_time': 'time_diff'}, axis=1)
sample[['tokenid_diff', 'time_diff']] = diff_actions
sample.fillna(1.0, inplace=True)
# Boolean for adds action
bool_adds = sample['tokenid_diff'] != 0
sample['bool_adds'] = bool_adds
# Boolean for dels action
bool_dels = sample['action'] == 'out'
sample['bool_dels'] = bool_dels
# Boolean for reins action
sample['bool_reins'] = ~bool_adds
sample[['bool_adds', 'bool_dels', 'bool_reins']] = sample[['bool_adds', 'bool_dels', 'bool_reins']].astype(int)
sample['reins-dels'] = sample['bool_reins'] - sample['bool_dels']
sample = sample.drop('bool_reins', axis=1).rename({'reins-dels':'bool_reins'}, axis=1)
# Boolean for time.
sample['time_diff'] = sample['time_diff'].shift(-1)
adds_index = sample[sample['bool_adds'] == 1].index
last_index = pd.Index(list(adds_index[1:] - 1) + [adds_index[-1]])
sample['bool_last'] = 0
sample.loc[last_index, 'bool_last']=1
sample['bool_survival'] = 0
survival_df = pd.DataFrame(
~(sample[sample['bool_last'] == 0]['time_diff'] < timedelta(2,0,0))).rename({'time_diff':'survival'},axis=1).astype(int)
survival_idx = survival_df[survival_df['survival'] == 1].index
sample.loc[survival_idx, 'bool_survival'] = 1
sample['bool_survive'] = sample['bool_survival'] + sample['bool_last']
sample = sample.drop(['bool_last', 'bool_survival'], axis=1)
return sample
def _action_survival(self, df_with_bools, bool_col):
action = df_with_bools.copy()
action['survive'] = action[bool_col] & action['bool_survive']
action = action[action[bool_col] == True].reset_index(drop=True)
action = action.drop(['tokenid_diff', 'time_diff','bool_adds', 'bool_dels', 'bool_reins', 'bool_survive'], axis=1)
action['survive'] = action['survive'].astype(int)
action.set_index('rev_id', inplace=True)
return action
def token_survive(self):
sample = self.get_states()
# Convert all 0-1 to boolean.
sample[['bool_adds', 'bool_dels', 'bool_reins', 'bool_survive']] = sample[
['bool_adds', 'bool_dels', 'bool_reins', 'bool_survive']].astype(bool)
# Survival states for all actions.
adds_actions = self._action_survival(sample, 'bool_adds')
dels_actions = self._action_survival(sample, 'bool_dels')
reins_actions = self._action_survival(sample, 'bool_reins')
return adds_actions, dels_actions, reins_actions
def _odd_true(self, number):
"""
"""
if type(number) == int:
if number % 2 == 0:
return False
else:
return True
elif len(number) == 1:
if number[0] % 2 == 0:
return False
else:
return True
else:
results = []
for i in number:
if i % 2 == 0:
results.append(False)
else:
results.append(True)
return
|
pd.Series(results)
|
pandas.Series
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
|
tm.assert_series_equal(actual, s2)
|
pandas.util.testing.assert_series_equal
|
# coding: utf-8
'''
EC2 ํ๊ฒฝ์์ ์คํํ๋ main
'''
import random
import pymysql
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import csv
import xgboost as xgb
from numpy import loadtxt
from xgboost import XGBClassifier
from xgboost import plot_importance
from xgboost import plot_tree
# ํ์ํ ํ์ผ import
import feature
import dataset
import model
###################### DB connect
db = pymysql.connect(host="", port=3306, user="", passwd="",db="")
print("start")
train_x = dataset.train_result()
# train๊ณผ test ํ์ธ์ ์ํด
print(train_x.columns)
## ์ฌ๊ธฐ make_answer์ ํ๋ผ๋ฏธํฐ๋ก train_x ๋ณด๋ด๋ ๊ฒ์ ๋ฐ๋
train_y = dataset.make_answer(train_x)
print("train_y ํ์ธ \n")
print(train_y)
## test set
test = dataset.test_result()
# train๊ณผ test ํ์ธ์ ์ํด
print(test.columns)
### submission form
out_df=test[["order_id", "product_id","user_id"]]
### ์ต์ข
data set --> array๋ก ๋ฐ๊พธ๊ธฐ
train_x = np.array(train_x.drop(["order_id", "user_id", "product_id","aisle_id","department_id"], axis=1))
test = test.drop(["order_id", "user_id", "product_id","aisle_id","department_id"], axis=1)
#test.to_csv("test.csv", index=False)
test = np.array(test)
# xgboost : ๋ชจ๋ธ์ ์์ฑํ๊ธฐ์ํ ๋ฐ์ดํฐ, ๋ต, ๋ชจ๋ธ ์์ฑํ ์์ธกํ ๋ฐ์ดํฐ -> 20๋ถ ์ ๋ ๊ฑธ๋ฆผ
pred = model.runXGB(train_x, train_y, test)
print("Prediction fin.")
#### ๋ชจ๋ธ ์์ธก๊ฐ ๋ฐ๋ก ์ ์ฅ
temp = pred
## ๋ชจ๋ธ ๋์ค๊ฐ ๊ฐ๋๋ผ๋ cutoff์ ๋ฐ๋ผ ๊ฐ์ด ๋ฐ๋
cutoff = temp.mean() - 0.02
temp[temp>=cutoff] = 1
temp[temp<cutoff] = 0
# ์ฃผ๋ฌธ ๋ฒํธ๋ ์ํ ๋ฒํธ์ ๋ง์ถฐ์ ์นผ๋ผ ์ถ๊ฐ
out_df["Pred"] = temp
out_df.head()
# ์ฌ๊ตฌ๋งคํ๋ค๊ณ ๊ฒฐ๊ณผ๊ฐ ๋์จ ๊ฒ๋ง ๋ฝ์๋ด๊ธฐ!
out_df = out_df.ix[out_df["Pred"].astype('int')==1]
# ์ค๋ณต ์ ๊ฑฐ
out_df = out_df.drop_duplicates()
### ์ด ํ๋ฅ ์ด ๋์ ์ํ๋ง ์ ํ
def merge_products(x):
return " ".join(list(x.astype('str')))
############### ์บ๊ธ ์ ์ถ์ฉ
kaggle = out_df.groupby("order_id")["product_id"].aggregate(merge_products).reset_index()
kaggle.columns = ["order_id", "products"]
SQL = "SELECT order_id FROM submission"
sub_df = pd.read_sql(SQL, db)
sub_df =
|
pd.merge(sub_df, kaggle, how="left", on="order_id")
|
pandas.merge
|
#!/usr/bin/env python3
from pathlib import Path
import pandas as pd
def main():
current_dir = Path.cwd()
all_final_iters = current_dir.rglob('final_iter.csv')
dfs = []
for csv in all_final_iters:
df =
|
pd.read_csv(csv)
|
pandas.read_csv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.