prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import itertools
import logging
import math
from copy import deepcopy
import numpy as np
import pandas as pd
from scipy.ndimage.filters import uniform_filter1d
import basty.utils.misc as misc
np.seterr(all="ignore")
class SpatioTemporal:
def __init__(self, fps, stft_cfg={}):
self.stft_cfg = deepcopy(stft_cfg)
self.logger = logging.getLogger("main")
assert fps > 0
self.get_delta = lambda x, scale: self.calc_delta(x, scale, fps)
self.get_moving_mean = lambda x, winsize: self.calc_moving_mean(x, winsize, fps)
self.get_moving_std = lambda x, winsize: self.calc_moving_std(x, winsize, fps)
delta_scales_ = [100, 300, 500]
window_sizes_ = [300, 500]
if "delta_scales" not in stft_cfg.keys():
self.logger.info(
"Scale valuess can not be found in configuration for delta features."
+ f"Default values are {str(delta_scales_)[1:-1]}."
)
if "window_sizes" not in stft_cfg.keys():
self.logger.info(
"Window sizes can not be found in configuration for window features."
+ f"Default values are {str(window_sizes_)[1:-1]}."
)
self.stft_cfg["delta_scales"] = stft_cfg.get("delta_scales", delta_scales_)
self.stft_cfg["window_sizes"] = stft_cfg.get("window_sizes", window_sizes_)
self.stft_set = ["pose", "distance", "angle"]
for ft_set in self.stft_set:
ft_set_dt = ft_set + "_delta"
self.stft_cfg[ft_set] = stft_cfg.get(ft_set, [])
self.stft_cfg[ft_set_dt] = stft_cfg.get(ft_set_dt, [])
self.angle_between = self.angle_between_atan
@staticmethod
def angle_between_arccos(v1, v2):
"""
Returns the abs(angle) in radians between vectors 'v1' and 'v2'.
angle_between((1, 0, 0), (0, 1, 0)) --> 1.5707963267948966
angle_between((1, 0, 0), (1, 0, 0)) --> 0.0
angle_between((1, 0, 0), (-1, 0, 0)) --> 3.141592653589793
"""
assert isinstance(v1, np.ndarray) and isinstance(v2, np.ndarray)
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
@staticmethod
def angle_between_atan(v1, v2):
"""
Returns the abs(angle) in radians between vectors 'v1' and 'v2'.
"""
assert isinstance(v1, np.ndarray) and isinstance(v2, np.ndarray)
angle = np.math.atan2(np.linalg.det([v1, v2]), np.dot(v1, v2))
return np.abs(angle)
def get_group_value(self, stft_group, opt):
if opt == "avg":
group_value = np.nanmean(stft_group, axis=1)
elif opt == "min":
group_value = np.nanamin(stft_group, axis=1)
elif opt == "max":
group_value = np.nanmax(stft_group, axis=1)
else:
raise ValueError(f"Unkown option {opt} is given for feature group.")
return group_value
@staticmethod
def calc_delta(x, scale, fps):
# In terms of millisecond.
delta_values = []
scale_frame = math.ceil(fps * (1000 / scale))
y = uniform_filter1d(x, size=scale_frame, axis=0)
delta_y = np.abs(np.gradient(y, 1 / fps * 1000, axis=0, edge_order=2))
delta_values.append(delta_y)
return delta_values
@staticmethod
def calc_moving_mean(x, winsize, fps):
mean_values = []
w_frame = math.ceil(fps * (winsize / 1000))
mean_values.append(x.rolling(w_frame, min_periods=1, center=True).mean())
return mean_values
@staticmethod
def calc_moving_std(x, winsize, fps):
std_values = []
w_frame = math.ceil(fps * (winsize / 1000))
std_values.append(x.rolling(w_frame, min_periods=1, center=True).std())
return std_values
def extract(self, ft_set, df_pose, ft_cfg_set):
extraction_functions = {
"pose": self._extract_pose,
"angle": self._extract_angle,
"distance": self._extract_distance,
}
val = extraction_functions[ft_set](df_pose, ft_cfg_set)
return val
def get_column_names(self, ft_set):
stft_cfg = self.stft_cfg
name_col = []
def get_stft_name(defn):
if isinstance(defn, dict):
name = (
list(defn.keys())[0]
+ "("
+ ",".join(["-".join(item) for item in list(defn.values())[0]])
+ ")"
)
elif isinstance(defn, list):
name = "-".join(defn)
else:
raise ValueError(
f"Given feature definition {defn} has incorrect formatting."
)
return name
if not stft_cfg.get(ft_set, False):
raise ValueError(f"Unkown value {ft_set} is given for feature set.")
if "pose" in ft_set:
ft_names = list(
itertools.chain.from_iterable(
([item + "_x"], [item + "_y"]) for item in stft_cfg[ft_set]
)
)
else:
ft_names = stft_cfg[ft_set]
if "delta" not in ft_set:
name_col = [ft_set + "." + get_stft_name(item) for item in ft_names]
else:
scales = stft_cfg["delta_scales"]
name_col = misc.flatten(
[
[
ft_set + "." + get_stft_name(item) + ".s" + str(t)
for item in ft_names
]
for t in scales
]
)
return name_col
@staticmethod
def _get_coord(df_pose, name, axis):
# Axis name x or y.
name_c = name + "_" + axis
if name_c in df_pose.columns:
coord = df_pose[name_c]
elif name == "origin":
coord = np.zeros(df_pose.shape[0])
else:
raise ValueError(f"No coordinate values can be found for {name}.")
return coord
def _extract_pose(self, df_pose, body_parts):
xy_pose_values = np.ndarray((df_pose.shape[0], len(body_parts) * 2))
if not isinstance(body_parts, list):
raise ValueError(
f"Given argument has type {type(body_parts)}."
+ "Pose features should be defined by a list of body-parts."
)
for i, bp in enumerate(body_parts):
if not isinstance(bp, str):
raise ValueError(
f"Given feature definition contains {bp}, which is not a body-part."
)
xy_pose_values[:, i * 2] = self.__class__._get_coord(df_pose, bp, "x")
xy_pose_values[:, i * 2 + 1] = self.__class__._get_coord(df_pose, bp, "y")
return xy_pose_values
def _extract_angle(self, df_pose, triplets):
angle_values = np.ndarray((df_pose.shape[0], len(triplets)))
def f_angle(x):
return self.angle_between(x[:2] - x[2:4], x[4:] - x[2:4])
def angle_along_axis(xy_values, angle_values):
for j in range(xy_values.shape[0]):
v1 = xy_values[j, :2] - xy_values[j, 2:4]
v2 = xy_values[j, 4:] - xy_values[j, 2:4]
angle_values[j, i] = self.angle_between(v1, v2)
return angle_values
for i, triplet in enumerate(triplets):
if isinstance(triplet, dict):
opt = list(triplet.keys())[0]
group = list(triplet.values())[0]
if len(group) > 0 and opt in ["avg", "min", "max"]:
angle_group = self._extract_angle(df_pose, group)
else:
raise ValueError(f"Given feature definition {triplet} is unknown.")
angle_values[:, i] = self.get_group_value(angle_group, opt)
else:
xy_values, _ = self._extract_pose(df_pose, triplet)
# angle_values[:, i] = np.apply_along_axis(f_angle, 1, xy_values)
# This is somehow faster.
angle_values[:, i] = angle_along_axis(xy_values, angle_values)
return angle_values
def _extract_distance(self, df_pose, pairs):
distance_values = np.ndarray((df_pose.shape[0], len(pairs)))
for i, pair in enumerate(pairs):
if isinstance(pair, dict):
opt = list(pair.keys())[0]
group = list(pair.values())[0]
if len(group) > 0 and opt in ["avg", "min", "max"]:
distance_group = self._extract_distance(df_pose, group)
else:
raise ValueError(f"Given feature definition {pair} is unkwon.")
distance_values[:, i] = self.get_group_value(distance_group, opt)
else:
xy_values = self._extract_pose(df_pose, pair)
diff_xy = xy_values[:, 2:4] - xy_values[:, :2]
distance_values[:, i] = np.sqrt(diff_xy[:, 0] ** 2 + diff_xy[:, 1] ** 2)
return distance_values
def _extract_moving_stat(self, df_stft, stft_names_dict, stat, winsizes):
if stat == "mean":
get_moving_stat = self.get_moving_mean
elif stat == "std":
get_moving_stat = self.get_moving_std
else:
raise ValueError(f"Unkown value {stat} is given for moving statistics.")
name_col = df_stft.columns
mv_stat = pd.concat(
itertools.chain(*map(lambda w: get_moving_stat(df_stft, w), winsizes)),
axis=1,
)
df_stat = pd.DataFrame(data=mv_stat)
stat_columns = misc.flatten(
[
[
stat + "." + stft_names_dict[name] + ".w" + str(w)
for name in name_col
]
for w in winsizes
]
)
name_dict = {i: stat_columns[i] for i in range(len(stat_columns))}
df_stat.columns = list(name_dict.keys())
return df_stat, name_dict
def extract_snap_stft(self, df_pose):
stft_cfg = self.stft_cfg
df_snap_list = []
for ft_set in self.stft_set:
if stft_cfg.get(ft_set, False):
temp_df = pd.DataFrame(self.extract(ft_set, df_pose, stft_cfg[ft_set]))
temp_df.columns = self.get_column_names(ft_set)
df_snap_list.append(temp_df)
if len(df_snap_list) <= 0:
raise ValueError(
"At least one snap feature must given in the feature configuration."
)
df_snap = | pd.concat(df_snap_list, axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from pandas_datareader import data
import matplotlib.pyplot as plt
def load_data(ticker, start_date, end_date, output_file):
"""
a data loading function, using the Yahoo Finance API
"""
try:
df = | pd.read_pickle(output_file) | pandas.read_pickle |
import requests
import zipfile
import io
import pandas as pd
from datetime import datetime, timedelta
| pd.set_option('display.width', None) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 1 12:55:16 2017
@author: rdk10
"""
import os
import pandas as pd
import sitchensis.Functions as f
import tkinter as tk
from tkinter.filedialog import askopenfilename
import pdb
############# Functions below #############################################
def getFileName():
cwd = os.getcwd()
root = tk.Tk()
root.lift()
root.attributes('-topmost',True)
filename = askopenfilename(initialdir = cwd ,title = "Select a tree file and directory", filetypes = [("Excel","*.xlsx"),("Excel","*.xlsm")]) #Ask user to pick files
root.after_idle(root.attributes,'-topmost',False)
root.withdraw()
fileName = filename.rsplit('/')[-1] #Excludes path to file
filePath = os.path.dirname(filename)
return(filePath, fileName)
def importExcelTree(fullFileName):
"""This section assumed one excel file per tree with a tabe for each type of measurements (trunk, segment, or branch)"""
#Import data all at once
treeData = pd.read_excel(fullFileName, sheet_name = None) #,converters={'name':str,'ref':str, 'referenceType':str})
####IMPORTANT if version of pandas is <21 then sheet_name is not recognized and needs to be sheetname. better to update pandas
#list of dictionary keys
keys = [key for key in treeData]
#This tests for types of data present and assigns keys
if any(['trunk' in t.lower() for t in treeData]):
trunkKey = keys[[i for i, key in enumerate(keys) if 'trunk' in key.lower()][0]]
if len(treeData[trunkKey])>0:
trunkBool = True
else:trunkBool = False
else:trunkBool = False
if any(['seg' in t.lower() for t in treeData]):
segKey = keys[[i for i, key in enumerate(keys) if 'seg' in key.lower()][0]]
if len(treeData[segKey])>0:
segBool = True
else:segBool = False
else:segBool = False
if any(['branch' in t.lower() for t in treeData]):
brKey = keys[[i for i, key in enumerate(keys) if 'branch' in key.lower()][0]]
if len(treeData[brKey])>0:
branchBool = True
else:branchBool = False
else:branchBool = False
#Assign declination to variable
if any(['declin' in t.lower() for t in treeData]):
if len([i for i, key in enumerate(keys) if 'declin' in key.lower()])>0:
declinKey = keys[[i for i, key in enumerate(keys) if 'declin' in key.lower()][0]]
declinRefs = pd.read_excel(fullFileName, sheet_name = declinKey ,converters={'name':str})
declinRefs.columns = [x.lower() for x in declinRefs.columns]
declination = declinRefs['declination'].iloc[0] #extract number
declination = declination.item() # convert to python float from numpy.float64
else:
declination = 0.00
#Assign cust refs to dataFrame
if len([i for i, key in enumerate(keys) if 'cust' in key.lower()])>0:
custKey = keys[[i for i, key in enumerate(keys) if 'cust' in key.lower()][0]]
custRefs = pd.read_excel(fullFileName, sheet_name = custKey ,converters={'name':str})
custRefs.columns = [x.lower() for x in custRefs.columns]
custRefs['azi'] = custRefs['azi'] + declination
custRefs = f.calcCustRefs(custRefs)
else:
custRefs = []
#Saves the data if it exists and makes changes to columns so they work in the program
if trunkBool:
trunkDat = pd.read_excel(fullFileName, sheet_name = trunkKey, converters={'name':str,'ref':str})#, 'ref type':str})
trunkDat.columns = [x.lower() for x in trunkDat.columns]
trunkDat['name'] = trunkDat['name'].str.upper()
trunkDat['name'] = trunkDat['name'].str.replace(" ","")
trunkDat['azi'] = trunkDat['azi'] + declination
if any(pd.isnull(trunkDat.index)):
trunkDat = trunkDat.reset_index(drop = True)
if segBool:
segs = pd.read_excel(fullFileName, parse_dates = False, sheet_name = segKey, converters={'name':str,'O/E':str,'base ref':str, 'top ref':str,'midsegment ref':str})
segs.columns = [x.lower() for x in segs.columns]
segs['name'] = segs['name'].str.replace(" ","")
if segs['base azi'].dtype == 'O':
print("Make sure there is no text in the 'base azi' column such as 'CALC' \n or there will be problems later")
else:
segs['base azi'] = segs['base azi'] + declination
segs['top azi'] = segs['top azi'] + declination
if any(pd.isnull(segs.index)):
segs = segs.reset_index(drop = True)
if 'base ht' in segs.columns and 'top ht' in segs.columns:
segs['base z'] = segs['base ht']
segs['top z'] = segs['top ht']
else:
print("Warning: you must have segment columns labeled 'base ht' and 'top ht'")
if any(pd.isnull(segs['name'])):
print('These is at least one missing name in the segments file, please rectify this.')
names = f.splitName(segs['name'])
segs['top name'] = names['topNames']
segs['base name'] = names['baseNames']
if branchBool:
branches = | pd.read_excel(fullFileName, parse_dates = False , sheet_name = brKey, converters={'name':str,'O/E':str, 'L/D':str,'origin':str,'base ref':str, 'top ref':str,'midsegment ref':str}) | pandas.read_excel |
import pyqtgraph as pg
from .__function__ import Function as _F
from scipy import signal
import pandas as pd
import copy
import numpy as np
class Function(_F):
def calc(self, srcSeries, f_Hz, **kwargs):
f_Hz = float(f_Hz)
fs = 1.0/srcSeries.attrs["_ts"]
order = int(kwargs.get("order",2))
fnorm = f_Hz/(fs/2)
b,a = signal.bessel(order,fnorm,'low')
newvals = signal.filtfilt(b,a,np.gradient(srcSeries))
newseries = | pd.Series(newvals, index=srcSeries.index) | pandas.Series |
import numpy as np
import os
import pandas as pd
import PySimpleGUI as sg
import csv
class Demonstrativo_cartao:
def __init__(self, nome, validade, lista_devedores, lista_compras, valores=None):
self.nome = nome
self.validade = validade
self.lista_devedores = lista_devedores
self.lista_compras = lista_compras
self.valores = valores
def criar_valores(self):
valores = []
linha = []
linha.append('A')
for i in (range(len(self.lista_compras))):
linha.append(0.0)
for i in range(len(self.lista_devedores)):
valores.append(linha[:])
cont = 0
for i in range(len(self.lista_devedores)):
valores[cont][0] = self.lista_devedores[cont]
cont += 1
print(cont)
return valores
def criar_dataframe(nome, vencimento, ano,mes_referencia, lista_devedores, lista_compras):
if not os.path.exists(os.getcwd() + '/tabelas/'):
os.makedirs(os.getcwd() + '/tabelas/')
lista_devedores_final = ['PARCELA ATUAL','TOTAL DE PARCELAS']
for i in lista_devedores:
lista_devedores_final.append(i)
print(lista_devedores_final)
print(lista_compras)
tabela = pd.DataFrame(columns=lista_compras, index=lista_devedores_final)
tabela.index.name = 'DEVEDORES'
for col in tabela.columns:
tabela[col].values[:] = 0.0
tabela.to_csv(os.getcwd() + '/tabelas/' + ano + '_' + mes_referencia + '_' + vencimento +'_' + nome + '.csv', sep=',', encoding='utf-8')
return True
def criar_planilha(filename):
# --- populate table with file contents --- #
if filename == '':
return
data = []
header_list = []
if filename is not None:
with open(filename, "r") as infile:
reader = csv.reader(infile)
header_list = next(reader)
data = list(reader)
return header_list, data
def lista_pagadores(valores):
pagadores = ''
for i in valores:
pagadores = pagadores + '-' + i[0]
pagadores = pagadores.replace('-PARCELA ATUAL-TOTAL DE PARCELAS-','')
return pagadores
def atualizar_compra(arquivo, opção, texto):
planilha = pd.read_csv(arquivo)
if opção == 'Alterar':
nomeantigo, nomenovo = texto.split()
planilhanova = planilha.rename(columns={nomeantigo: nomenovo})
planilha = pd.DataFrame(planilhanova)
elif opção == 'Remover':
planilha = pd.DataFrame(planilha)
del planilha[texto]
elif opção == 'Adicionar':
planilha = planilha.assign(novo=0.0)
nomecolunanova = 'novo'
planilha = planilha.rename(columns={nomecolunanova: texto})
planilha.to_csv(os.getcwd() + '/tabelas/' + 'novo.csv', sep=',', encoding='utf-8', index= False)
os.remove(arquivo)
os.rename(os.getcwd() + '/tabelas/' + 'novo.csv', arquivo)
def atualizar_devedor(arquivo, opção, indice, nomenovo):
planilha = | pd.read_csv(arquivo) | pandas.read_csv |
import pandas as pd
import xlrd
import sys
from ross.materials import Material
class DataNotFoundError(Exception):
"""
An exception indicating that the data could not be found in the file.
"""
pass
def read_table_file(file, element, sheet_name=0, n=0, sheet_type="Model"):
"""Instantiate one or more element objects using inputs from an Excel table.
Parameters
----------
file: str
Path to the file containing the shaft parameters.
element: str
Specify the type of element to be instantiated: bearing, shaft or disk.
sheet_name: int or str, optional
Position of the sheet in the file (starting from 0) or its name. If none is passed, it is
assumed to be the first sheet in the file.
n: int
Exclusive for bearing elements, since this parameter is given outside the table file.
sheet_type: str
Exclusive for shaft elements, as they have a Model table in which more information can be passed,
such as the material parameters.
Returns
-------
A dictionary of parameters.
Examples
--------
>>> import os
>>> file_path = os.path.dirname(os.path.realpath(__file__)) + '/tests/data/shaft_si.xls'
>>> read_table_file(file_path, "shaft", sheet_type="Model", sheet_name="Model") # doctest: +ELLIPSIS
{'L': [0.03...
"""
df = pd.read_excel(file, header=None, sheet_name=sheet_name)
# Assign specific values to variables
parameter_columns = {}
optional_parameter_columns = {}
header_key_word = ''
default_dictionary = {}
parameters = {}
if element == 'bearing':
header_key_word = 'kxx'
parameter_columns['kxx'] = ['kxx']
parameter_columns['cxx'] = ['cxx']
optional_parameter_columns['kyy'] = ['kyy']
optional_parameter_columns['kxy'] = ['kxy']
optional_parameter_columns['kyx'] = ['kyx']
optional_parameter_columns['cyy'] = ['cyy']
optional_parameter_columns['cxy'] = ['cxy']
optional_parameter_columns['cyx'] = ['cyx']
optional_parameter_columns['w'] = ['w', 'speed']
default_dictionary['kyy'] = None
default_dictionary['kxy'] = 0
default_dictionary['kyx'] = 0
default_dictionary['cyy'] = None
default_dictionary['cxy'] = 0
default_dictionary['cyx'] = 0
default_dictionary['w'] = None
elif element == 'shaft':
if sheet_type == 'Model':
header_key_word = 'od_left'
else:
header_key_word = 'material'
parameter_columns['L'] = ['length']
parameter_columns['i_d'] = ['i_d', 'id', 'id_left']
parameter_columns['o_d'] = ['o_d', 'od', 'od_left']
parameter_columns['material'] = ['material', 'matnum']
optional_parameter_columns['n'] = ['n', 'elemnum']
optional_parameter_columns['axial_force'] = ['axial_force', 'axial force', 'axial']
optional_parameter_columns['torque'] = ['torque']
optional_parameter_columns['shear_effects'] = ['shear_effects', 'shear effects']
optional_parameter_columns['rotary_inertia'] = ['rotary_inertia', 'rotary inertia']
optional_parameter_columns['gyroscopic'] = ['gyroscopic']
optional_parameter_columns['shear_method_calc'] = ['shear_method_calc', 'shear method calc']
default_dictionary['n'] = None
default_dictionary['axial_force'] = 0
default_dictionary['torque'] = 0
default_dictionary['shear_effects'] = True
default_dictionary['rotary_inertia'] = True
default_dictionary['gyroscopic'] = True
default_dictionary['shear_method_calc'] = 'cowper'
elif element == 'disk':
header_key_word = 'ip'
parameter_columns['n'] = ['unnamed: 0', 'n']
parameter_columns['m'] = ['m', 'mass']
parameter_columns['Id'] = ['it', 'id']
parameter_columns['Ip'] = ['ip']
# Find table header and define if conversion is needed
header_index = -1
header_found = False
convert_to_metric = False
convert_to_rad_per_sec = False
for index, row in df.iterrows():
for i in range(0, row.size):
if isinstance(row[i], str):
if not header_found:
if row[i].lower() == header_key_word:
header_index = index
header_found = True
if 'inches' in row[i].lower() or 'lbm' in row[i].lower():
convert_to_metric = True
if 'rpm' in row[i].lower():
convert_to_rad_per_sec = True
if header_found and convert_to_metric and convert_to_rad_per_sec:
break
if header_found and convert_to_metric:
break
if not header_found:
raise ValueError("Could not find the header. Make sure the table has a header "
"containing the names of the columns. In the case of a " + element + ", "
"there should be a column named " + header_key_word + ".")
# Get specific data from the file
new_materials = {}
if element == 'shaft' and sheet_type == 'Model':
material_header_index = -1
material_header_found = False
material_header_key_word = 'matno'
for index, row in df.iterrows():
for i in range(0, row.size):
if isinstance(row[i], str):
if row[i].lower() == material_header_key_word:
material_header_index = index
material_header_found = True
break
if material_header_found:
break
if not material_header_found:
raise ValueError("Could not find the header for the materials. Make sure the table has a header "
"with the parameters for the materials that will be used. There should be a column "
"named " + material_header_key_word + ".")
df_material = pd.read_excel(file, header=material_header_index, sheet_name=sheet_name)
material_name = []
material_rho = []
material_e = []
material_g_s = []
for index, row in df_material.iterrows():
if not pd.isna(row['matno']):
material_name.append(int(row['matno']))
material_rho.append(row['rhoa'])
material_e.append(row['ea'])
material_g_s.append(row['ga'])
else:
break
if convert_to_metric:
for i in range(0, len(material_name)):
material_rho[i] = material_rho[i] * 27679.904
material_e[i] = material_e[i] * 6894.757
material_g_s[i] = material_g_s[i] * 6894.757
for i in range(0, len(material_name)):
new_material = Material(
name='shaft_mat_' + str(material_name[i]),
rho=material_rho[i],
E=material_e[i],
G_s=material_g_s[i],
)
new_materials['shaft_mat_' + str(material_name[i])] = new_material
df = pd.read_excel(file, header=header_index, sheet_name=sheet_name)
df.columns = df.columns.str.lower()
# Find and isolate data rows
first_data_row_found = False
last_data_row_found = False
indexes_to_drop = []
for index, row in df.iterrows():
if not first_data_row_found \
and (isinstance(row[header_key_word], int) or isinstance(row[header_key_word], float)) \
and not | pd.isna(row[header_key_word]) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 13:52:36 2020
@author: diego
"""
import os
import sqlite3
import numpy as np
import pandas as pd
import plots as _plots
import update_prices
import update_companies_info
pd.set_option("display.width", 400)
pd.set_option("display.max_columns", 10)
pd.options.mode.chained_assignment = None
update_prices.update_prices()
update_companies_info.update_db()
cwd = os.getcwd()
conn = sqlite3.connect(os.path.join(cwd, "data", "finance.db"))
cur = conn.cursor()
# %% Functions
class Ticker:
"""
Attributes and Methods to analyse stocks traded in B3 -BOLSA BRASIL BALCÃO
"""
def __init__(self, ticker, group="consolidated"):
"""
Creates a Ticker Class Object
Args:
ticker: string
string of the ticker
group: string
Financial statements group. Can be 'consolidated' or 'individual'
"""
self.ticker = ticker.upper()
df = pd.read_sql(
f"""SELECT cnpj, type, sector, subsector, segment, denom_comerc
FROM tickers
WHERE ticker = '{self.ticker}'""",
conn,
)
if len(df) == 0:
print('unknown ticker')
return
self.cnpj = df["cnpj"][0]
self.type = df["type"][0]
self.sector = df["sector"][0]
self.subsector = df["subsector"][0]
self.segment = df["segment"][0]
self.denom_comerc = df["denom_comerc"][0]
Ticker.set_group(self, group)
on_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'ON'",
conn,
)
on_ticker = on_ticker[on_ticker["ticker"].str[-1] == "3"]
self.on_ticker = on_ticker.values[0][0]
try:
self.pn_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'PN'",
conn,
).values[0][0]
except:
pass
def set_group(self, new_group):
"""
To change the financial statement group attribute of a object
Args:
new_group: string
can be 'consolidated' or 'individual'
"""
if new_group in ["individual", "consolidado", "consolidated"]:
if new_group == "individual":
self.grupo = "Individual"
else:
self.grupo = "Consolidado"
# Infer the frequency of the reports
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
if len(dates) == 0:
self.grupo = "Individual"
print(
f"The group of {self.ticker} was automatically switched to individual due to the lack of consolidated statements."
)
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
try:
freq = pd.infer_freq(dates["date"])
self.freq = freq[0]
except ValueError:
self.freq = "Q"
except TypeError:
dates["date"] = pd.to_datetime(dates["date"])
number_of_observations = len(dates)
period_of_time = (
dates.iloc[-1, 0] - dates.iloc[0, 0]
) / np.timedelta64(1, "Y")
if number_of_observations / period_of_time > 1:
self.freq = "Q"
else:
self.freq = "A"
if self.freq == "A":
print(
f"""
The {self.grupo} statements of {self.ticker} are only available on an annual basis.
Only YTD values will be available in the functions and many functions will not work.
Try setting the financial statements to individual:
Ticker.set_group(Ticker object, 'individual')
"""
)
else:
print("new_group needs to be 'consolidated' or 'individual'.")
def get_begin_period(self, function, start_period):
"""
Support method for other methods of the Class
"""
if start_period == "all":
begin_period = pd.to_datetime("1900-01-01")
return begin_period.date()
elif start_period not in ["all", "last"]:
try:
pd.to_datetime(start_period)
except:
print(
"start_period must be 'last', 'all', or date formated as 'YYYY-MM-DD'."
)
return
if start_period == "last":
if function in ["prices", "total_shares", "market_value"]:
last_date = pd.read_sql(
f"SELECT date FROM prices WHERE ticker = '{self.ticker}' ORDER BY date DESC LIMIT(1)",
conn,
)
else:
last_date = pd.read_sql(
f"SELECT dt_fim_exerc FROM dre WHERE cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}' ORDER BY dt_fim_exerc DESC LIMIT(1)",
conn,
)
begin_period = pd.to_datetime(last_date.values[0][0])
else:
begin_period = pd.to_datetime(start_period)
return begin_period.date()
def create_pivot_table(df):
"""
Support method for other methods of the Class
"""
##### Creates a pivot table and add % change columns #####
# create columns with % change of the values
# value_types: ytd, quarter_value, ttm_value
first_type = df.columns.get_loc('ds_conta') + 1
value_types = list(df.columns[first_type:])
new_columns = [i + " % change" for i in value_types]
df[new_columns] = df[value_types].div(
df.groupby("cd_conta")[value_types].shift(1))
# the calculation of %change from ytd is different:
if 'ytd' in value_types:
shifted_values = df[['dt_fim_exerc', 'cd_conta', 'ytd']]
shifted_values = shifted_values.set_index(
[(pd.to_datetime(shifted_values['dt_fim_exerc']) + pd.DateOffset(years=1)), shifted_values['cd_conta']])
df = df.set_index([df['dt_fim_exerc'], df['cd_conta']])
df['ytd % change'] = df['ytd'] / shifted_values['ytd']
df[new_columns] = (df[new_columns] - 1) * 100
# reshape
df = df.pivot(
index=["cd_conta", "ds_conta"],
columns=["dt_fim_exerc"],
values=value_types + new_columns
)
# rename multiIndex column levels
df.columns = df.columns.rename("value", level=0)
df.columns = df.columns.rename("date", level=1)
# sort columns by date
df = df.sort_values([("date"), ("value")], axis=1, ascending=False)
# So times, the description of the accounts have small differences for the
# same account in different periods, as punctuation. The purpose of the df_index
# is to keep only one description to each account, avoiding duplicated rows.
df_index = df.reset_index().iloc[:, 0:2]
df_index.columns = df_index.columns.droplevel(1)
df_index = df_index.groupby("cd_conta").first()
# This groupby adds the duplicated rows
df = df.groupby(level=0, axis=0).sum()
# The next two lines add the account description to the dataframe multiIndex
df["ds_conta"] = df_index["ds_conta"]
df = df.set_index("ds_conta", append=True)
# Reorder the multiIndex column levels
df = df.reorder_levels(order=[1, 0], axis=1)
# Due to the command line 'df = df.sort_values([('dt_fim_exerc'), ('value')],
# axis=1, ascending=False)'
# the columns are ordered by date descending, and value descending. The pupose
# here is to set the order as: date descending and value ascending
df_columns = df.columns.to_native_types()
new_order = []
for i in range(1, len(df_columns), 2):
new_order.append(df_columns[i])
new_order.append(df_columns[i - 1])
new_order = pd.MultiIndex.from_tuples(
new_order, names=("date", "value"))
df = df[new_order]
return df
def income_statement(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the income statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="income_statement", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if quarter == False:
df = df.drop(["quarter_value"], axis=1)
if ytd == False:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def balance_sheet(self, start_period="all", plot=False):
"""
Creates a dataframe with the balance sheet statement of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="bp", start_period=start_period
)
query = f"""SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpa
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
UNION ALL
SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpp
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, parse_dates=['dt_fim_exerc'])
df = Ticker.create_pivot_table(df)
if plot:
_plots.bs_plot(df, self.ticker, self.grupo)
return df
def cash_flow(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the cash flow statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="dfc", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dfc
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if not quarter:
df = df.drop(["quarter_value"], axis=1)
if not ytd:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def prices(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="prices", start_period=start_period
)
prices = pd.read_sql(
f"""SELECT date, preult AS price
FROM prices
WHERE ticker = '{self.ticker}' AND date >= '{begin_period}'
ORDER BY date""",
conn,
index_col="date", parse_dates=['date']
)
return prices
def total_shares(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="total_shares", start_period=start_period
)
query = f"""SELECT date, number_shares AS on_shares
FROM prices
WHERE ticker = '{self.on_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_on = pd.read_sql(query, conn)
try:
query = f"""SELECT date, number_shares AS pn_shares
FROM prices
WHERE ticker = '{self.pn_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_pn = pd.read_sql(query, conn)
shares = nshares_on.merge(nshares_pn, how="left")
shares["total_shares"] = shares["on_shares"] + \
shares["pn_shares"].fillna(0)
except:
shares = nshares_on.rename({"on_shares": "total_shares"}, axis=1)
shares.index = shares["date"]
shares.index = pd.to_datetime(shares.index)
return shares[["total_shares"]]
def net_income(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the net income information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="net_income", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, ds_conta, vl_conta AS ytd_net_income
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND (ds_conta = 'Resultado Líquido das Operações Continuadas' OR ds_conta = 'Lucro/Prejuízo do Período')
ORDER BY dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Líquido das Operações Continuadas"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Lucro/Prejuízo do Período"
]
df = df.drop(["ds_conta"], axis=1)
df["quarter_net_income"] = df["ytd_net_income"] - \
df["ytd_net_income"].shift(1)
df["quarter_net_income"][df["fiscal_quarter"] == 1] = df["ytd_net_income"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_net_income"] = (
df["quarter_net_income"].rolling(window=4, min_periods=4).sum()
)
if quarter == False:
df = df.drop(["quarter_net_income"], axis=1)
if ytd == False:
df = df.drop(["ytd_net_income"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Net Income (R$,000) ')
return df
def ebit(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the ebit information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="ebit", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, ds_conta, vl_conta AS ytd_ebit
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND (ds_conta = 'Resultado Antes do Resultado Financeiro e dos Tributos' OR ds_conta = 'Resultado Operacional')
ORDER BY dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Antes do Resultado Financeiro e dos Tributos"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Resultado Operacional"
]
df = df.drop(["ds_conta"], axis=1)
df["quarter_ebit"] = df["ytd_ebit"] - df["ytd_ebit"].shift(1)
df["quarter_ebit"][df["fiscal_quarter"] == 1] = df["ytd_ebit"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_ebit"] = df["quarter_ebit"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_ebit"], axis=1)
if ytd == False:
df = df.drop(["ytd_ebit"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' EBIT (R$,000) ')
return df
def depre_amort(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the depreciationa and amortization information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="depre_amort", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, vl_conta AS ytd_d_a
FROM dva
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND ds_conta = 'Depreciação, Amortização e Exaustão'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
df["quarter_d_a"] = df["ytd_d_a"] - df["ytd_d_a"].shift(1)
df["quarter_d_a"][df["fiscal_quarter"] ==
1] = df["ytd_d_a"][df["fiscal_quarter"] == 1]
if ttm == True:
df["ttm_d_a"] = df["quarter_d_a"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_d_a"], axis=1)
if ytd == False:
df = df.drop(["ytd_d_a"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo, bars=' D&A (R$,000)')
return df
def ebitda(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the ebitda information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="ebitda", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dre.dt_fim_exerc AS date,
dre.fiscal_quarter,
dre.ds_conta,
dre.vl_conta AS ytd_ebit,
dva.vl_conta AS ytd_d_a
FROM dre
LEFT JOIN dva ON (dre.dt_fim_exerc=dva.dt_fim_exerc AND dre.grupo_dfp=dva.grupo_dfp AND dre.cnpj=dva.cnpj)
WHERE dre.cnpj = '{self.cnpj}'
AND dre.grupo_dfp = '{self.grupo}'
AND dre.dt_fim_exerc >= '{begin_period.date()}'
AND (dre.ds_conta = 'Resultado Antes do Resultado Financeiro e dos Tributos' OR dre.ds_conta = 'Resultado Operacional')
AND dva.ds_conta = 'Depreciação, Amortização e Exaustão'
ORDER BY dre.dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Antes do Resultado Financeiro e dos Tributos"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Resultado Operacional"
]
df["ebit"] = df["ytd_ebit"] - df["ytd_ebit"].shift(1)
df["ebit"][df["fiscal_quarter"] == 1] = df["ytd_ebit"][
df["fiscal_quarter"] == 1
]
df["d_a"] = df["ytd_d_a"] - df["ytd_d_a"].shift(1)
df["d_a"][df["fiscal_quarter"] ==
1] = df["ytd_d_a"][df["fiscal_quarter"] == 1]
df["quarter_ebitda"] = df["ebit"] - df["d_a"]
if ttm == True:
df["ttm_ebitda"] = df["quarter_ebitda"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_ebitda"], axis=1)
if ytd == True:
df["ytd_ebitda"] = df["ytd_ebit"] - df["ytd_d_a"]
df = df[df.index >= begin_period + | pd.DateOffset(months=12) | pandas.DateOffset |
import pandas as pd
import sqlite3
import numpy as np
def hampel_filter(df, col, k, threshold=1):
df['rolling_median'] = df[col].rolling(k).median()
df['rolling_std'] = df[col].rolling(k).std()
df['num_sigma'] = abs(df[col]-df['rolling_median'])/df['rolling_std']
df[col] = np.where(df['num_sigma'] > threshold, df['rolling_median'], df[col])
del df['rolling_median']
del df['rolling_std']
del df['num_sigma']
return df
con = sqlite3.connect("C:/Users/<NAME>/PycharmProjects/hampt_rd_data (2).sqlite")
# Total hourly rainfall from gauge 036
rain_sql_036 = "SELECT * FROM datavalues WHERE SiteID=21 AND VariableID=1"
rain_df_036 = pd.read_sql_query(rain_sql_036, con, index_col="Datetime", parse_dates=['Datetime'])
rain_df_036.drop(["ValueID", "VariableID", "QCID", "SiteID"], axis=1, inplace=True)
rain_df_036.columns = ["MMPS-036"]
rain_df_036.sort_index(inplace=True)
rain_sum_036 = rain_df_036.resample("H").sum()
# Total hourly rainfall from gauge 039
rain_sql_039 = "SELECT * FROM datavalues WHERE SiteID=13 AND VariableID=1"
rain_df_039 = pd.read_sql_query(rain_sql_039, con, index_col="Datetime", parse_dates=['Datetime'])
rain_df_039.drop(["ValueID", "VariableID", "QCID", "SiteID"], axis=1, inplace=True)
rain_df_039.columns = ["MMPS-039"]
rain_df_039.sort_index(inplace=True)
rain_sum_039 = rain_df_039.resample("H").sum()
# Total hourly rainfall from gauge 140
rain_sql_140 = "SELECT * FROM datavalues WHERE SiteID=15 AND VariableID=1"
rain_df_140 = pd.read_sql_query(rain_sql_140, con, index_col="Datetime", parse_dates=['Datetime'])
rain_df_140.drop(["ValueID", "VariableID", "QCID", "SiteID"], axis=1, inplace=True)
rain_df_140.columns = ["MMPS-140"]
rain_df_140.sort_index(inplace=True)
rain_sum_140 = rain_df_140.resample("H").sum()
# Total hourly rainfall from gauge 149
rain_sql_149 = "SELECT * FROM datavalues WHERE SiteID=7 AND VariableID=1"
rain_df_149 = pd.read_sql_query(rain_sql_149, con, index_col="Datetime", parse_dates=['Datetime'])
rain_df_149.drop(["ValueID", "VariableID", "QCID", "SiteID"], axis=1, inplace=True)
rain_df_149.columns = ["MMPS-149"]
rain_df_149.sort_index(inplace=True)
rain_sum_149 = rain_df_149.resample("H").sum()
# Total hourly rainfall from gauge 163
rain_sql_163 = "SELECT * FROM datavalues WHERE SiteID=16 AND VariableID=1"
rain_df_163 = pd.read_sql_query(rain_sql_163, con, index_col="Datetime", parse_dates=['Datetime'])
rain_df_163.drop(["ValueID", "VariableID", "QCID", "SiteID"], axis=1, inplace=True)
rain_df_163.columns = ["MMPS-163"]
rain_df_163.sort_index(inplace=True)
rain_sum_163 = rain_df_163.resample("H").sum()
# Total hourly rainfall from gauge 175
rain_sql_175 = "SELECT * FROM datavalues WHERE SiteID=11 AND VariableID=1"
rain_df_175 = pd.read_sql_query(rain_sql_175, con, index_col="Datetime", parse_dates=['Datetime'])
rain_df_175.drop(["ValueID", "VariableID", "QCID", "SiteID"], axis=1, inplace=True)
rain_df_175.columns = ["MMPS-175"]
rain_df_175.sort_index(inplace=True)
rain_sum_175 = rain_df_175.resample("H").sum()
# Total hourly rainfall from gauge 177
rain_sql_177 = "SELECT * FROM datavalues WHERE SiteID=12 AND VariableID=1"
rain_df_177 = pd.read_sql_query(rain_sql_177, con, index_col="Datetime", parse_dates=['Datetime'])
rain_df_177.drop(["ValueID","VariableID", "QCID", "SiteID"], axis=1, inplace=True)
rain_df_177.columns = ["MMPS-177"]
rain_df_177.sort_index(inplace=True)
rain_sum_177 = rain_df_177.resample("H").sum()
# # hourly tide for Sewell's Point
# tide_sql = "SELECT * FROM datavalues WHERE SiteID=17 AND VariableID=5"
# tide_df = pd.read_sql_query(tide_sql, con,index_col="Datetime", parse_dates=['Datetime'])
# tide_df.drop(["ValueID","VariableID","QCID","SiteID"], axis=1, inplace=True)
# tide_df.columns=["Tide"]
# tide_df.sort_index(inplace=True)
# tide_mean = tide_df.resample("H").mean()
# # Well MMPS-...
# gw_sql = "SELECT * FROM datavalues WHERE SiteID=9 AND VariableID=4"
# gw_df = pd.read_sql_query(gw_sql, con,index_col="Datetime", parse_dates=['Datetime'])
# gw_df.drop(["ValueID","VariableID","QCID","SiteID"], axis=1, inplace=True)
# gw_df.columns = ["GWL"]
# gw_df.sort_index(inplace=True)
# gw_hampel = hampel_filter(gw_df,"GWL",720)#Filtering for one day is 720 time steps
# gw_mean = gw_hampel.resample("H").mean()
# df = pd.concat([gw_mean,tide_mean,rain_sum_039,rain_sum_175])
# df = pd.merge(gw_mean,tide_mean,left_index=True,right_index=True)
# df = pd.merge(df, rain_sum_003, left_index=True, right_index=True)
df = | pd.merge(rain_sum_036, rain_sum_039, left_index=True, right_index=True) | pandas.merge |
import pandas as pd
import sqlite3
class Co2:
# ind_name -> 산업명
def ind_name(self,ind):
con = sqlite3.connect('./sorting.db')
df = pd.read_sql_query('select * from sorting',con)
df2 = df[['sort','industry']]
df3 = df2[df2['industry'] == ind]
result = df3['sort'].tolist()
return result
# graph -> 그래프용 데이터 추출
def graph(self,ind,sido):
#df = pd.read_excel('../../data/calc2_result_graph.xlsx',engine='openpyxl', index_col=0);
con = sqlite3.connect('./sorting.db')
df = pd.read_sql_query('select * from calc2_result_graph',con)
df = df.set_index('time')
graph_col = ind + sido
for i in df:
if i == graph_col:
df = df[graph_col]
df2 = df.transpose()
result = df2[['year_sum','tanso','tree']].tolist()
return result
########################################################
#---------------------BASE--------------------------------#
# 감축량 이하값 기준으로 기본 sort (1순위 감축량 / 2순위 절감액)#
########################################################
def sol1_alt(self,ind,user_co2):
# django 돌릴때 경로
con = sqlite3.connect('./sorting.db')
# 테스트로 돌릴때 경로
# con = sqlite3.connect('../../sorting.db')
df = pd.read_sql_query('select * from top_8_final', con)
user_co2 = float(user_co2)
# 제조업
if ind == 'manufacture':
df_manu = df[df['sort'] == '제조업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_manu['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_manu_except = df_manu.tail(5);
df_manu_except2 = df_manu_except['deco2'].tolist();
for i in df_manu_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_manu[df_manu['deco2']==i]);
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist();
result_data2 = df3['act'].tolist();
result_data3 = df3['money'].tolist();
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 건설업
elif ind == 'building':
df_bulid = df[df['sort'] == '건설업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_bulid['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_bulid_except = df_bulid.tail(5);
df_bulid_except2 = df_bulid_except['deco2'].tolist();
for i in df_bulid_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_bulid[df_bulid['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 도매 및 소매업
elif ind == 'retail':
df_retail = df[df['sort'] == '도매 및 소매업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_retail['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_retail_except = df_retail.tail(5);
df_retail_except2 = df_retail_except['deco2'].tolist();
for i in df_retail_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_retail[df_retail['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 광업
elif ind == 'mining':
df_mining = df[df['sort'] == '광업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_mining['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_mining_except = df_mining.tail(5);
df_mining_except2 = df_mining_except['deco2'].tolist();
for i in df_mining_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_mining[df_mining['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 운수업
elif ind == 'transportation':
df_transport = df[df['sort'] == '운수업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_transport['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_transport_except = df_transport.tail(5);
df_transport_except2 = df_transport_except['deco2'].tolist();
for i in df_transport_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_transport[df_transport['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 폐기물 및 재생사업
elif ind == 'recycle':
df_recycle = df[df['sort'] == '폐기물 및 재생사업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_recycle['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_recycle_except = df_recycle.tail(5);
df_recycle_except2 = df_recycle_except['deco2'].tolist();
for i in df_recycle_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_recycle[df_recycle['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 발전에너지, 수도사업
elif ind == 'energy':
df_energy = df[df['sort'] == '발전에너지, 수도사업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_energy['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_energy_except = df_energy.tail(5);
df_energy_except2 = df_energy_except['deco2'].tolist();
for i in df_energy_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_energy[df_energy['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 농림어업
elif ind == 'primary':
df_primary = df[df['sort'] == '농림어업']
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_primary['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_primary_except = df_primary.tail(5);
df_primary_except2 = df_primary_except['deco2'].tolist();
for i in df_primary_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_primary[df_primary['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
return result
########################################################
# 감축량 이하값 기준으로 sort (에너지 절감량(연료) 기준)#
########################################################
def sol1_alt_fuel(self,ind,user_co2,sort_base):
# django 돌릴때 경로
con = sqlite3.connect('./sorting.db')
# 테스트로 돌릴때 경로
# con = sqlite3.connect('../../sorting.db')
df = pd.read_sql_query('select * from top_8_final', con)
for i in range(15,20):
df_co2 = df[df['no'] == sort_base[i]].index
df = df.drop(df_co2)
user_co2 = float(user_co2)
# 제조업
if ind == 'manufacture':
df_manu = df[df['sort'] == '제조업']
df_manu = df_manu.sort_values(by=['에너지 절감량(연료)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_manu['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_manu_except = df_manu.tail(5);
df_manu_except2 = df_manu_except['deco2'].tolist();
for i in df_manu_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_manu[df_manu['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 건설업
elif ind == 'building':
df_bulid = df[df['sort'] == '건설업']
df_bulid = df_bulid.sort_values(by=['에너지 절감량(연료)'], ascending=[False])
data = [];
data2 = [];
df3 = pd.DataFrame()
for i in df_bulid['deco2']:
if i < user_co2:
data.append(i);
try:
for i in range(0,5):
data2.append(data[i]);
except:
df_bulid_except = df_bulid.tail(5);
df_bulid_except2 = df_bulid_except['deco2'].tolist();
for i in df_bulid_except2:
data2.append(i);
for i in data2:
df3 = df3.append(df_bulid[df_bulid['deco2']==i])
df3 = df3.head(5)
result_data1 = df3['deco2'].tolist()
result_data2 = df3['act'].tolist()
result_data3 = df3['money'].tolist()
result_data4 = df3['no'].tolist();
result = result_data1 + result_data2 + result_data3 + result_data4;
# 도매 및 소매업
elif ind == 'retail':
df_retail = df[df['sort'] == '도매 및 소매업']
df_retail = df_retail.sort_values(by=['에너지 절감량(연료)'], ascending=[False])
data = [];
data2 = [];
df3 = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
stocks = | pd.Series([20.1, 100.0, 66.5], index=['tx', 'tobao', 'apple']) | pandas.Series |
# pylint: disable=bad-continuation
"""
Defines the Targetted Maximum likelihood Estimation (TMLE) model class
"""
from pprint import pprint
import numpy as np
import pandas as pd
from pandas.api.types import is_float_dtype, is_numeric_dtype
from scipy.interpolate import interp1d
from scipy.stats import norm
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
from statsmodels.genmod.generalized_linear_model import GLM
from causal_curve.core import Core
from causal_curve.utils import rand_seed_wrapper
class TMLE(Core):
"""
Constructs a causal dose response curve through a series of TMLE comparisons across a grid
of the treatment values. Gradient boosting is used for prediction in Q model and G model.
Assumes continuous treatment and outcome variable.
WARNING:
-In choosing `treatment_grid_bins` be very careful to respect the "positivity" assumption.
There must be sufficient data and variability of treatment within each bin the treatment
is split into.
-This algorithm assumes you've already performed the necessary transformations to
categorical covariates (i.e. these variables are already one-hot encoded and
one of the categories is excluded for each set of dummy variables).
-Please take care to ensure that the "ignorability" assumption is met (i.e.
all strong confounders are captured in your covariates and there is no
informative censoring), otherwise your results will be biased, sometimes strongly so.
Parameters
----------
treatment_grid_bins: list of floats or ints
Represents the edges of bins of treatment values that are used to construct a smooth curve
Look at the distribution of your treatment variable to determine which
family is more appropriate. Be mindful of the "positivity" assumption when determining
bins. In other words, make sure there aren't too few data points in each bin. Mean
treatment values between the bin edges will be used to generate the CDRC.
n_estimators: int, optional (default = 100)
Optional argument to set the number of learners to use when sklearn
creates TMLE's Q and G models.
learning_rate: float, optional (default = 0.1)
Optional argument to set the sklearn's learning rate for TMLE's Q and G models.
max_depth: int, optional (default = 5)
Optional argument to set sklearn's maximum depth when creating TMLE's Q and G models.
random_seed: int, optional (default = None)
Sets the random seed.
verbose: bool, optional (default = False)
Determines whether the user will get verbose status updates.
Attributes
----------
psi_list: array of shape (len(treatment_grid_bins) - 2, )
The estimated causal difference between treatment bins
std_error_ic_list: array of shape (len(treatment_grid_bins) - 2, )
The standard errors for the psi estimates found in `psi_list`
Methods
----------
fit: (self, T, X, y)
Fits the causal dose-response model
calculate_CDRC: (self, ci, CDRC_grid_num)
Calculates the CDRC (and confidence interval) from TMLE estimation
Examples
--------
>>> from causal_curve import TMLE
>>> tmle = TMLE(treatment_grid_bins = [0, 0.03, 0.05, 0.25, 0.5, 1.0, 2.0],
n_estimators=500,
learning_rate = 0.1,
max_depth = 5,
random_seed=111
)
>>> tmle.fit(T = df['Treatment'], X = df[['X_1', 'X_2']], y = df['Outcome'])
>>> tmle_results = tmle.calculate_CDRC(0.95)
References
----------
<NAME> and <NAME>. Targeted maximum likelihood learning. In: The International
Journal of Biostatistics, 2(1), 2006.
<NAME> and <NAME>. Collaborative double robust penalized targeted
maximum likelihood estimation. In: The International Journal of Biostatistics 6(1), 2010.
"""
def __init__(
self,
treatment_grid_bins,
n_estimators=100,
learning_rate=0.1,
max_depth=5,
random_seed=None,
verbose=False,
):
self.treatment_grid_bins = treatment_grid_bins
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.max_depth = max_depth
self.random_seed = random_seed
self.verbose = verbose
# Validate the params
self._validate_init_params()
rand_seed_wrapper()
if self.verbose:
print("Using the following params for TMLE model:")
pprint(self.get_params(), indent=4)
def _validate_init_params(self):
"""
Checks that the params used when instantiating TMLE model are formatted correctly
"""
# Checks for treatment_grid_bins
if not isinstance(self.treatment_grid_bins, list):
raise TypeError(
f"treatment_grid_bins parameter must be a list, "
f"but found type {type(self.treatment_grid_bins)}"
)
for element in self.treatment_grid_bins:
if not isinstance(element, (int, float)):
raise TypeError(
f"'{element}' in `treatment_grid_bins` list is not of type float or int, "
f"it is {type(element)}"
)
if len(self.treatment_grid_bins) < 2:
raise TypeError("treatment_grid_bins list must, at minimum, of length >= 2")
# Checks for n_estimators
if not isinstance(self.n_estimators, int):
raise TypeError(
f"n_estimators parameter must be an integer, "
f"but found type {type(self.n_estimators)}"
)
if (self.n_estimators < 10) or (self.n_estimators > 100000):
raise TypeError("n_estimators parameter must be between 10 and 100000")
# Checks for learning_rate
if not isinstance(self.learning_rate, (int, float)):
raise TypeError(
f"learning_rate parameter must be an integer or float, "
f"but found type {type(self.learning_rate)}"
)
if (self.learning_rate <= 0) or (self.learning_rate >= 1000):
raise TypeError(
"learning_rate parameter must be greater than 0 and less than 1000"
)
# Checks for max_depth
if not isinstance(self.max_depth, int):
raise TypeError(
f"max_depth parameter must be an integer, "
f"but found type {type(self.max_depth)}"
)
if self.max_depth <= 0:
raise TypeError("max_depth parameter must be greater than 0")
# Checks for random_seed
if not isinstance(self.random_seed, (int, type(None))):
raise TypeError(
f"random_seed parameter must be an int, but found type {type(self.random_seed)}"
)
if (isinstance(self.random_seed, int)) and self.random_seed < 0:
raise ValueError(f"random_seed parameter must be > 0")
# Checks for verbose
if not isinstance(self.verbose, bool):
raise TypeError(
f"verbose parameter must be a boolean type, but found type {type(self.verbose)}"
)
def _validate_fit_data(self):
"""Verifies that T, X, and y are formatted the right way"""
# Checks for T column
if not is_float_dtype(self.t_data):
raise TypeError(f"Treatment data must be of type float")
# Make sure all X columns are float or int
if isinstance(self.x_data, pd.Series):
if not is_numeric_dtype(self.x_data):
raise TypeError(
f"All covariate (X) columns must be int or float type (i.e. must be numeric)"
)
elif isinstance(self.x_data, pd.DataFrame):
for column in self.x_data:
if not is_numeric_dtype(self.x_data[column]):
raise TypeError(
"""All covariate (X) columns must be int or float type
(i.e. must be numeric)"""
)
# Checks for Y column
if not | is_float_dtype(self.y_data) | pandas.api.types.is_float_dtype |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
tm.assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
tm.assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = f"len of {i}, but only 5 lines in file"
with pytest.raises(ParserError, match=msg):
read_csv(path, header=list(range(i)), index_col=0)
# write with cols
msg = "cannot specify cols with a MultiIndex"
with pytest.raises(TypeError, match=msg):
df.to_csv(path, columns=["foo", "bar"])
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_interval_index(self):
# GH 28210
df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
df.to_csv(path)
result = self.read_csv(path, index_col=0)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = df.copy()
expected.index = expected.index.astype(str)
tm.assert_frame_equal(result, expected)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with tm.ensure_clean("__tmp_to_csv_float32_nanrep__.csv") as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(",")[2] == "999"
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({"A": [1, 2, 3], "B": ["5,6", "7,8", "9,0"]})
with tm.ensure_clean("__tmp_to_csv_withcommas__.csv") as path:
df.to_csv(path)
df2 = self.read_csv(path)
tm.assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [f"{name}{i:03d}" for i in range(5)]
df_float = DataFrame(
np.random.randn(100, 5), dtype="float64", columns=create_cols("float")
)
df_int = DataFrame(
np.random.randn(100, 5).astype("int64"),
dtype="int64",
columns=create_cols("int"),
)
df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool"))
df_object = DataFrame(
"foo", index=df_float.index, columns=create_cols("object")
)
df_dt = DataFrame(
Timestamp("20010101"), index=df_float.index, columns=create_cols("date")
)
# add in some nans
df_float.iloc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = {}
for n, dtype in [
("float", np.float64),
("int", np.int64),
("bool", np.bool_),
("object", object),
]:
for c in create_cols(n):
dtypes[c] = dtype
with tm.ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(
filename, index_col=0, dtype=dtypes, parse_dates=create_cols("date")
)
tm.assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(
np.random.randn(1000, 30),
columns=list(range(15)) + list(range(15)),
dtype="float64",
)
with tm.ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
tm.assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype="float64")
df_int = DataFrame(np.random.randn(1000, 3)).astype("int64")
df_bool = DataFrame(True, index=df_float.index, columns=range(3))
df_object = DataFrame("foo", index=df_float.index, columns=range(3))
df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3))
df = pd.concat(
[df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True
)
df.columns = [0, 1, 2] * 5
with tm.ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ["0.4", "1.4", "2.4"]:
result[i] = to_datetime(result[i])
result.columns = df.columns
tm.assert_frame_equal(result, df)
# GH3457
N = 10
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
with tm.ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={"a.1": "a"})
tm.assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({"A": range(100000)})
aa["B"] = aa.A + 1.0
aa["C"] = aa.A + 2.0
aa["D"] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with tm.ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
tm.assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with tm.ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
tm.assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO("a,1.0\nb,2.0")
df = self.read_csv(f1, header=None)
newdf = DataFrame({"t": df[df.columns[0]]})
with | tm.ensure_clean() | pandas._testing.ensure_clean |
#!/usr/bin/env python
# encoding: utf-8
import os.path
import pandas as pd
DATA_FILE_DIR = "input/raw/studies_by_council/"
OUTPUT = "input/generated/"
def read_data():
"""Create a summary dataframe based on concatenation of multiple Excel files.
Goes through all the xlsx files in the appropriate dir, converts each
one to a df and concatenates successively into a super df with all
the data
:params: none
:return: a dataframe formed from multiple dataframes
"""
new = True
for datafile in os.listdir(DATA_FILE_DIR):
if datafile.endswith('.xlsx'):
# Create the col name by dropping the file extension from the filename,
# and then adding the keyword "funder_"
new_col = 'funder_' + str(datafile)[:-5]
# Need this if loop because the first iternation has to be a straight read rather than a concatenation
if new:
dataframe = pd.read_excel(DATA_FILE_DIR + str(datafile), sheetname='CaseStudies')
dataframe = pd.DataFrame(dataframe['Case Study Id'])
dataframe[new_col] = new_col
new = False
else:
temp_df = pd.read_excel(DATA_FILE_DIR + str(datafile), sheetname='CaseStudies')
# Knock out everything except the case study
temp_df = pd.DataFrame(temp_df['Case Study Id'])
temp_df[new_col] = new_col
# Use concat to add temp dataframe onto end of the main dataframe
dataframe = | pd.concat([dataframe, temp_df]) | pandas.concat |
import numpy as np
import scipy
import pandas as pd
import astropy.units as u
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.units import Quantity
from astroquery.mast import Catalogs
from astroquery.gaia import Gaia
import requests
import re
from stellar.isoclassify import classify, pipeline
import os
import sys
import warnings
warnings.filterwarnings("ignore")
stellar_path = os.path.dirname(os.path.abspath( __file__ ))
def QueryGaiaAndSurveys(sc,CONESIZE=15*u.arcsec,savefile=None,mission='tess'):
#Getting Gaia DR2 RVs:
job = Gaia.launch_job_async("SELECT * \
FROM gaiadr2.gaia_source \
WHERE CONTAINS(POINT('ICRS',gaiadr2.gaia_source.ra,gaiadr2.gaia_source.dec),\
CIRCLE('ICRS',"+str(float(sc.ra.deg))+","+str(float(sc.dec.deg))+","+str(CONESIZE.to(u.deg).value)+"))=1;",verbose=False)
gaia_res=job.get_results().to_pandas()
#"closeness" function combines proximity of source to RA and DEC (as function of CONESIZE)
# *AND* brightness (scaled 1 arcsec ~ 9th mag, or 2*sep ~ deltamag=2.3)
closeness=(np.hypot(sc.ra.deg-gaia_res.ra,sc.dec.deg-gaia_res.dec)/CONESIZE.to(u.deg).value)*np.exp(0.3*(gaia_res.phot_g_mean_mag-18))
#Getting APASS results:
apasshtml="https://www.aavso.org/cgi-bin/apass_dr10_download.pl?ra={0:.4f}&dec={1:.4f}&radius={2:.4f}&output=csv".format(sc.ra.deg,sc.dec.deg,CONESIZE.to(u.deg).value)
out=requests.get(apasshtml)
if out.status_code==200:
#Modifying the output HTML to include a <tr> before the first row of header labels:
APASS=pd.read_html(out.text.replace('<table border=1>\n\t\t<td','<table border=1>\n<tr><td'),header=0)
if len(APASS)>0:
APASS=APASS[0]
else:
APASS=None
else:
APASS=None
#pd.DataFrame.from_csv("https://www.aavso.org/cgi-bin/apass_dr10_download.pl?ra="+str(SC.ra.deg)+"&dec="+str(SC.dec.deg)+"&radius="+str(CONESIZE/3600)+"&output=csv")
alldat=pd.DataFrame()
#Looping through Gaia results to find best match
for n,row in enumerate(gaia_res.iterrows()):
#Name of series becomes 00_gaiaid (for target) and then 01_gaiaid (for subsequent blends)
sername=str(list(np.sort(closeness)).index(closeness[n]))+'_'+str(row[1]['source_id'])
alldattemp=pd.Series({'mission':mission},name=sername)
#print(alldattemp)
alldattemp=alldattemp.append(gaia_res.iloc[n])
print("Querying catalogues. "+str(n)+" of "+str(len(gaia_res)))
#multiple rows, let's search using the Gaia RA/DECs
#newra=row[1]['ra']
#newdec=row[1]['dec']
#print(row[1]['designation'],"<desig, id>",int(row[1]['source_id']))
if APASS is not None:
#"closeness algorithm = [dist in arcsec]*exp(0.3*[delta mag])
closeness_apass=3600*np.hypot(row[1]['ra']-APASS['RA (deg)'],row[1]['dec']-APASS['Dec (deg)'])*np.exp(0.3*(row[1]['phot_g_mean_mag']-APASS['Sloan g\' (SG)']))
if np.min(closeness_apass)<2.5:
#Takes best APASS source if there is a source: (within 1 arcsec and deltamag<3) or (<2.5arcsec and deltamag=0.0)
#Appending APASS info:
nrby_apas=APASS.iloc[np.argmin(closeness_apass)]
nrby_apas=nrby_apas.rename(index={col:'ap_'+col for col in nrby_apas.index if col not in gaia_res.columns})
alldattemp=alldattemp.append(nrby_apas.drop([col for col in gaia_res.columns if col in nrby_apas.index]))
dr=int(row[1]['designation'].decode("utf-8")[7])
gid=row[1]['source_id']
#Now searching the cross-matched cats with the GAIA ID
jobs={}
jobs['2m'] = Gaia.launch_job_async("SELECT * \
FROM gaiadr"+str(dr)+".gaia_source AS g, gaiadr"+str(dr)+".tmass_best_neighbour AS tbest, gaiadr1.tmass_original_valid AS tmass \
WHERE g.source_id = tbest.source_id AND tbest.tmass_oid = tmass.tmass_oid \
AND g.source_id = "+str(gid), dump_to_file=False,verbose=False)
jobs['sd'] = Gaia.launch_job_async("SELECT * \
FROM gaiadr"+str(dr)+".gaia_source AS g, gaiadr"+str(dr)+".sdss"+"_"[:(2-dr)]+"dr9_best_neighbour AS sdbest, gaiadr1.sdssdr9_original_valid AS sdss \
WHERE g.source_id = sdbest.source_id AND sdbest.sdssdr9_oid = sdss.sdssdr9_oid \
AND g.source_id = "+str(gid), dump_to_file=False,verbose=False)
jobs['ur'] = Gaia.launch_job_async("SELECT * \
FROM gaiadr"+str(dr)+".gaia_source AS g, gaiadr"+str(dr)+".urat1_best_neighbour AS uratbest, gaiadr1.urat1_original_valid AS urat1 \
WHERE g.source_id = uratbest.source_id AND uratbest.urat1_oid = urat1.urat1_oid \
AND g.source_id = "+str(gid), dump_to_file=False,verbose=False)
jobs['wise'] = Gaia.launch_job_async("SELECT * \
FROM gaiadr"+str(dr)+".gaia_source AS g, gaiadr"+str(dr)+".allwise_best_neighbour AS wisest, gaiadr1.allwise_original_valid AS wise \
WHERE g.source_id = wisest.source_id AND wisest.allwise_oid = wise.allwise_oid \
AND g.source_id = "+str(gid), dump_to_file=False,verbose=False)
for job in jobs:
res=jobs[job].get_results().to_pandas()
if res.shape[0]>0:
#Making
res=res.rename(columns={col:job+'_'+col for col in res.columns if col not in gaia_res.columns})
alldattemp=alldattemp.append(res.iloc[0].drop([col for col in gaia_res.columns if col in res.columns]))
alldattemp=alldattemp.drop_duplicates()
#print(alldattemp,,job_sd.get_results().to_pandas(),
# job_ur.get_results().to_pandas(),job_wise.get_results().to_pandas())
#alldattemp=pd.concat([alldattemp,job_2m.get_results().to_pandas(),job_sd.get_results().to_pandas(),
# job_ur.get_results().to_pandas(),job_wise.get_results().to_pandas()],
# axis=1)
alldat=alldat.append(alldattemp.rename(sername))
alldat['dilution_ap']=np.tile(CONESIZE.to(u.arcsec).value,len(alldat))
alldat['prop_all_flux']=alldat['phot_g_mean_flux'].values/np.nansum(alldat['phot_g_mean_flux'].values)
alldat['diluted_by']=1.0-alldat['prop_all_flux']
if type(alldat)==pd.DataFrame and len(alldat)>1:
targ=alldat.iloc[np.argmin(closeness)]
elif type(alldat)==pd.DataFrame:
targ=alldat.iloc[0]
elif type(alldat)==pd.Series:
targ=alldat
if savefile is not None:
alldat.iloc[np.argsort(closeness)].to_csv(savefile.replace('.csv','_all_contams.csv'))
targ.to_csv(savefile)
#print(str(alldat.shape)," dic created with data from ",','.join([cats[i] if len(jobs[i])>0 else "" for i in range(5)]))
return targ
def GetKICinfo(kic):
#Getting Kepler stellar info from end-of-Kepler Q1-Q17 data table:
kicdat=pd.read_csv("https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=q1_q17_dr25_stellar&where=kepid=%27"+str(int(kic))+"%27")
if len(kicdat.shape)>1:
kicdat=kicdat.iloc[0]
for row in kicdat.index:
newname=row[:]
if 'dens' in row:
newname=newname.replace('dens','rho_gcm3')
elif 'radius' in row:
newname=newname.replace('radius','rad')
if '_err1' in row:
newname=newname.replace('_err1','ep')
elif '_err2' in row:
newname=newname.replace('_err2','em')
kicdat=kicdat.rename(index={row:newname})
try:
kicdat[newname]=float(kicdat[newname])
except:
continue
for row in kicdat.index:
#Adding simple average errors:
if 'em' in row and row[:-1] not in kicdat.index:
kicdat[row[:-1]]=0.5*(abs(kicdat[row])+abs(kicdat[row[:-1]+'p']))
#Adding rho in terms of solar density:
kicdat['rho']=kicdat['rho_gcm3']/1.411
kicdat['rhoep']=kicdat['rho_gcm3ep']/1.411
kicdat['rhoem']=kicdat['rho_gcm3em']/1.411
kicdat['rhoe']=0.5*(abs(kicdat['rhoep'])+abs(kicdat['rhoem']))
kicdat['rho_gcm3e']=1.411*kicdat['rhoe']
#kicdat=pd.DataFrame.from_csv("https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=keplerstellar&where=epic_number=%27"+str(int(kic))+"%27")
kicdat['mission']='kepler'
kicdat['id']=kicdat['kepid']
kicdat['spec']=None
kicdat['source']='kic'
return kicdat
def GetExoFop(icid,mission='tess',file=''):
cols={'Telescope':'telescope','Instrument':'instrument','Teff (K)':'teff','Teff (K) Error':'teffe','log(g)':'logg',
'log(g) Error':'logge','Radius (R_Sun)':'rad','Radius (R_Sun) Error':'rade','logR\'HK':'logrhk',
'logR\'HK Error':'logrhke','S-index':'sindex','S-index Error':'sindexe','H-alpha':'haplha','H-alpha Error':'halphae',
'Vsini':'vsini','Vsini Error':'vsinie','Rot Per':'rot_per','Rot Per Error':'rot_pere','Metallicity':'feh',
'Metallicity Error':'fehe','Mass (M_Sun)':'mass','Mass (M_Sun) Error':'masse','Density (g/cm^3)':'rho_gcm3',
'Density (g/cm^3) Error':'rho_gcm3e','Luminosity':'lum','Luminosity Error':'lume','Observation Time (BJD)':'obs_time_bjd',
'RV (m/s)':'rv_ms','RV Error':'rv_mse','Distance (pc)':'dis','Distance (pc) Error':'dise',
'# of Contamination sources':'n_contams', 'B':'bmag', 'B Error':'bmage', 'Dec':'dec', 'Ecliptic Lat':'lat_ecl',
'Ecliptic Long':'long_ecl', 'Gaia':'gmag', 'Gaia Error':'gmage', 'Galactic Lat':'lat_gal', 'Galactic Long':'long_gal',
'H':'hmag', 'H Error':'hmage', 'In CTL':'in_ctl', 'J':'jmag', 'J Error':'jmage', 'K':'kmag', 'K Error':'kmage',
'Planet Name(s)':'planet_names', 'Proper Motion Dec (mas/yr)':'pm_dec',
'Proper Motion RA (mas/yr)':'pm_ra', 'RA':'ra', 'Star Name & Aliases':'star_name', 'TESS':'tmag','Kep':'kepmag',
'TESS Error':'tmage', 'TIC Contamination Ratio':'ratio_contams', 'TOI':'toi', 'V':'vmag', 'V Error':'vmage',
'WISE 12 micron':'w3mag', 'WISE 12 micron Error':'w3mage', 'WISE 22 micron':'w4mag',
'WISE 22 micron Error':'w4mage', 'WISE 3.4 micron':'w1mag', 'WISE 3.4 micron Error':'w1mage',
'WISE 4.6 micron':'w2mag', 'WISE 4.6 micron Error':'w2mag', 'n_TOIs':'n_tois','spec':'spec',
'Campaign':'campaign','Object Type':'objtype'}
#Strips online file for a given epic/tic
if mission.lower() in ['kep','kepler']:
kicinfo=GetKICinfo(icid)
#Checking if the object is also in the TIC:
ticout=Catalogs.query_criteria(catalog="Tic",coordinates=str(kicinfo['ra'])+','+str(kicinfo['dec']),
radius=20*u.arcsecond,objType="STAR",columns=['ID','KIC','Tmag','Vmag']).to_pandas()
if len(ticout.shape)>1:
ticout=ticout.loc[np.argmin(ticout['Tmag'])]
icid=ticout['ID']
mission='tess'
elif ticout.shape[0]>0:
#Not in TIC
return kicinfo
else:
kicinfo = None
assert mission.lower() in ['tess','k2']
outdat={}
outdat['mission']=mission.lower()
#Searching TESS and K2 ExoFop for info (and TIC-8 info):
req=requests.get("https://exofop.ipac.caltech.edu/"+mission.lower()+"/download_target.php?id="+str(icid), timeout=120)
if req.status_code==200:
#Splitting into each 'paragraph'
sections=req.text.split('\n\n')
for sect in sections:
#Processing each section:
if sect[:2]=='RA':
#This is just general info - saving
for line in sect.split('\n'):
if mission.lower()=='tess':
if line[:28].strip() in cols:
outdat[cols[line[:28].strip()]]=line[28:45].split(' ')[0].strip()
else:
outdat[re.sub('\ |\^|\/|\{|\}|\(|\)|\[|\]', '',line[:28])]=line[28:45].split(' ')[0].strip()
elif mission.lower()=='k2':
if line[:13].strip() in cols:
outdat[cols[line[:13].strip()]]=line[13:].strip()
else:
outdat[re.sub('\ |\^|\/|\{|\}|\(|\)|\[|\]', '',line[:13])]=line[13:].strip()
elif sect[:24]=='TESS Objects of Interest':
#Only taking number of TOIs and TOI number:
outdat['n_TOIs']=len(sect.split('\n'))-2
outdat['TOI']=sect.split('\n')[2][:15].strip()
elif sect[:7]=='STELLAR':
#Stellar parameters
labrow=sect.split('\n')[1]
boolarr=np.array([s==' ' for s in labrow])
splits=[0]+list(2+np.where(boolarr[:-3]*boolarr[1:-2]*~boolarr[2:-1]*~boolarr[3:])[0])+[len(labrow)]
labs = [re.sub('\ |\^|\/|\{|\}|\(|\)|\[|\]', '',labrow[splits[i]:splits[i+1]]) for i in range(len(splits)-1)]
spec=[]
if mission.lower()=='tess':
#Going through all sources of Stellar params:
for row in sect.split('\n')[2:]:
stpars=np.array([row[splits[i]:splits[i+1]].strip() for i in range(len(splits)-1)])
for nl in range(len(labs)):
if labs[nl].strip() not in cols:
label=re.sub('\ |\/|\{|\}|\(|\)|\[|\]', '', labs[nl]).replace('Error','_err')
else:
label=cols[labs[nl].strip()]
if not label in outdat.keys() and stpars[1]=='' and stpars[nl].strip()!='':
#Stellar info just comes from TIC, so saving simply:
outdat[label] = stpars[nl]
elif stpars[1]!='' and stpars[nl].strip()!='':
#Stellar info comes from follow-up, so saving with _INSTRUMENT:
spec+=['_'+row[splits[3]:splits[4]].strip()]
outdat[labs[nl]+'_'+stpars[1]] = stpars[nl]
elif mission.lower()=='k2':
for row in sect.split('\n')[1:]:
if row[splits[0]:splits[1]].strip() not in cols:
label=re.sub('\ |\/|\{|\}|\(|\)|\[|\]', '', row[splits[0]:splits[1]]).replace('Error','_err')
else:
label=cols[row[splits[0]:splits[1]].strip()]
if not label in outdat.keys() and row[splits[3]:splits[4]].strip()=='huber':
outdat[label] = row[splits[1]:splits[2]].strip()
outdat[label+'_err'] = row[splits[2]:splits[3]].strip()
elif label in outdat.keys() and row[splits[3]:splits[4]].strip()!='huber':
if row[splits[3]:splits[4]].strip()!='macdougall':
spec+=['_'+row[splits[3]:splits[4]].strip()]
#Adding extra stellar params with _user (no way to tell the source, e.g. spectra)
outdat[label+'_'+row[splits[3]:splits[4]].strip()] = row[splits[1]:splits[2]].strip()
outdat[label+'_err'+'_'+row[splits[3]:splits[4]].strip()] = row[splits[2]:splits[3]].strip()
outdat['spec']=None if len(spec)==0 else ','.join(list(np.unique(spec)))
elif sect[:9]=='MAGNITUDE':
labrow=sect.split('\n')[1]
boolarr=np.array([s==' ' for s in labrow])
splits=[0]+list(2+np.where(boolarr[:-3]*boolarr[1:-2]*~boolarr[2:-1]*~boolarr[3:])[0])+[len(labrow)]
for row in sect.split('\n')[2:]:
if row[splits[0]:splits[1]].strip() not in cols:
label=re.sub('\ |\/|\{|\}|\(|\)|\[|\]', '', row[splits[0]:splits[1]]).replace('Error','_err')
else:
label=cols[row[splits[0]:splits[1]].strip()]
outdat[label] = row[splits[1]:splits[2]].strip()
outdat[label+'_err'] = row[splits[2]:splits[3]].strip()
outdat=pd.Series(outdat,name=icid)
#Replacing err and err1/2 with em and ep
for col in outdat.index:
try:
outdat[col]=float(outdat[col])
except:
pass
if col.find('_err1')!=-1:
outdat=outdat.rename(series={col:col.replace('_err1','ep')})
elif col.find('_err2')!=-1:
outdat=outdat.rename(series={col:col.replace('_err2','em')})
elif col.find('_err')!='1':
outdat[col.replace('_err','em')]=outdat[col]
outdat[col.replace('_err','ep')]=outdat[col]
outdat=outdat.rename(series={col:col.replace('_err','e')})
for col in outdat.index:
if 'radius' in col:
outdat=outdat.rename(series={col:col.replace('radius','rad')})
if col[-2:]=='em' and col[:-1] not in outdat.index and type(outdat[col])!=str:
#average of em and ep -> e
outdat[col[:-1]]=0.5*(abs(outdat[col])+abs(outdat[col[:-1]+'p']))
return outdat, kicinfo
elif kicinfo is not None:
return None, kicinfo
else:
return None, None
def LoadModel():
#Loading isoclassify "mesa" model from file:
mist_loc='/'.join(classify.__file__.split('/')[:-3])+'/mesa.h5'
import h5py
file = h5py.File(mist_loc,'r+', driver='core', backing_store=False)
model = {'age':np.array(file['age']),\
'mass':np.array(file['mass']),\
'feh':np.array(file['feh']),\
'teff':np.array(file['teff']),\
'logg':np.array(file['logg']),\
'rad':np.array(file['rad']),\
'lum':np.array(file['rad']),\
'rho':np.array(file['rho']),\
'dage':np.array(file['dage']),\
'dmass':np.array(file['dmass']),\
'dfeh':np.array(file['dfeh']),\
'eep':np.array(file['eep']),\
'bmag':np.array(file['bmag']),\
'vmag':np.array(file['vmag']),\
'btmag':np.array(file['btmag']),\
'vtmag':np.array(file['vtmag']),\
'gmag':np.array(file['gmag']),\
'rmag':np.array(file['rmag']),\
'imag':np.array(file['imag']),\
'zmag':np.array(file['zmag']),\
'jmag':np.array(file['jmag']),\
'hmag':np.array(file['hmag']),\
'kmag':np.array(file['kmag']),\
'd51mag':np.array(file['d51mag']),\
'gamag':np.array(file['gamag']),\
'fdnu':np.array(file['fdnu']),\
'avs':np.zeros(len(np.array(file['gamag']))),\
'dis':np.zeros(len(np.array(file['gamag'])))}
model['rho'] = np.log10(model['rho'])
model['lum'] = model['rad']**2*(model['teff']/5777.)**4
# next line turns off Dnu scaling relation corrections
model['fdnu'][:]=1.
model['avs']=np.zeros(len(model['teff']))
model['dis']=np.zeros(len(model['teff']))
'''
# load MIST models
homedir=os.path.expanduser('~/')
import pickle
model=pickle.load(open(mist_loc,'rb'),encoding='latin')
# prelims to manipulate some model variables (to be automated soon ...)
model['rho']=np.log10(model['rho'])
model['fdnu'][:]=1.
model['avs']=np.zeros(len(model['teff']))
model['dis']=np.zeros(len(model['teff']))
'''
return model
def LoadDust(sc,plx,dust='allsky'):
import mwdust
av=mwdust.SFD()(sc.galactic.l.deg,sc.galactic.b.deg,1000.0/plx)
#sfdmap(sc.ra.deg.to_string(),sc.dec.deg.to_string())
if dust == 'allsky':
dustmodel = pipeline.query_dustmodel_coords_allsky(sc.ra.deg,sc.dec.deg)
ext = pipeline.extinction('cardelli')
if dust == 'green18':
dustmodel = pipeline.query_dustmodel_coords(sc.ra.deg,sc.dec.deg)
ext = pipeline.extinction('schlafly16')
if dust == 'none':
dustmodel = 0
ext = pipeline.extinction('cardelli')
return dustmodel,ext
def dens2(logg,loggerr1,loggerr2,rad,raderr1,raderr2,mass,masserr1,masserr2,nd=6000,returnpost=False):
#Returns a density as the weighted average of that from logg and mass
dens1 = lambda logg,rad: (np.power(10,logg-2)/(1.33333*np.pi*6.67e-11*rad*695500000))/1410.0
dens2 = lambda mass,rad: ((mass*1.96e30)/(4/3.0*np.pi*(rad*695500000)**3))/1410.0
loggs= np.random.normal(logg,0.5*(loggerr1+loggerr2),nd)
rads= np.random.normal(rad,0.5*(raderr1+raderr2),nd)
rads[rads<0]=abs(np.random.normal(rad,0.25*(raderr1+raderr2),np.sum(rads<0)))
masses= np.random.normal(mass,0.5*(masserr1+masserr2),nd)
masses[masses<0]=abs(np.random.normal(mass,0.25*(masserr1+masserr2),np.sum(masses<0)))
d1=np.array([dens1(loggs[l],rads[l]) for l in range(nd)])
d2=np.array([dens2(masses[m],rads[m]) for m in range(nd)])
#Combining up/down dists alone for up.down uncertainties. COmbining all dists for median.
#Gives almost identical values as a weighted average of resulting medians/errors.
#print("logg/rad: "+str(np.median(d1))+"+/-"+str(np.std(d1))+", mass/rad:"+str(np.median(d2))+"+/-"+str(np.std(d2)))
post=d1 if np.std(d1)<np.std(d2) else d2
if returnpost:
#Returning combined posterier...
return post
else:
dens=np.percentile(post,[16,50,84])
return np.array([dens[1],np.diff(dens)[0],np.diff(dens)[1]])
def QueryNearbyGaia(sc,CONESIZE,file=None):
job = Gaia.launch_job_async("SELECT * \
FROM gaiadr2.gaia_source \
WHERE CONTAINS(POINT('ICRS',gaiadr2.gaia_source.ra,gaiadr2.gaia_source.dec),\
CIRCLE('ICRS',"+str(sc.ra.deg)+","+str(sc.dec.deg)+","+str(CONESIZE/3600.0)+"))=1;" \
, dump_to_file=True,output_file=file)
df=job.get_results().to_pandas()
'''
if df:
job = Gaia.launch_job_async("SELECT * \
FROM gaiadr1.gaia_source \
WHERE CONTAINS(POINT('ICRS',gaiadr1.gaia_source.ra,gaiadr1.gaia_source.dec),\
CIRCLE('ICRS',"+str(sc.ra.deg)+","+str(sc.dec.deg)+","+str(CONESIZE/3600.0)+"))=1;" \
, dump_to_file=True,output_file=file)
'''
print(np.shape(df))
if np.shape(df)[0]>1:
print(df.shape[0],"stars with mags:",df.phot_g_mean_mag.values,'and teffs:',df.teff_val.values)
#Taking brightest star as target
df=df.loc[np.argmin(df.phot_g_mean_mag)]
if len(np.shape(df))>1:
df=df.iloc[0]
if np.shape(df)[0]!=0 or np.isnan(float(df['teff_val'])):
outdf={}
#print(df[['teff_val','teff_percentile_upper','radius_val','radius_percentile_upper','lum_val','lum_percentile_upper']])
outdf['Teff']=float(df['teff_val'])
outdf['e_Teff']=0.5*(float(df['teff_percentile_upper'])-float(df['teff_percentile_lower']))
#print(np.shape(df))
#print(df['lum_val'])
if not np.isnan(df['lum_val']):
outdf['lum']=float(df['lum_val'])
outdf['e_lum']=0.5*(float(df['lum_percentile_upper'])-float(df['lum_percentile_lower']))
else:
if outdf['Teff']<9000:
outdf['lum']=np.power(10,5.6*np.log10(outdf['Teff']/5880))
outdf['e_lum']=1.0
else:
outdf['lum']=np.power(10,8.9*np.log10(outdf['Teff']/5880))
outdf['e_lum']=0.3*outdf['lum']
if not np.isnan(df['radius_val']):
outdf['rad']=float(df['radius_val'])
outdf['e_rad']=0.5*(float(df['radius_percentile_upper'])-float(df['radius_percentile_lower']))
else:
mass=outdf['lum']**(1/3.5)
if outdf['Teff']<9000:
outdf['rad']=mass**(3/7.)
outdf['e_rad']=0.5*outdf['rad']
else:
outdf['rad']=mass**(19/23.)
outdf['e_rad']=0.5*outdf['rad']
outdf['GAIAmag_api']=df['phot_g_mean_mag']
else:
print("NO GAIA TARGET FOUND")
outdf={}
return outdf
def CheckSpecCsv(radec,icid,thresh=20*u.arcsec):
specs=pd.read_csv(stellar_path+"/spectra_all.csv")
spec_coords=SkyCoord(specs['ra']*u.deg,specs['dec']*u.deg)
seps=radec.separation(spec_coords)
#Searching by ID
if icid in specs.input_id:
out=specs.loc[specs.input_id.values==icid,['teff','teff_err','logg','logg_err','feh','feh_err']]
elif np.min(seps)<thresh:
#And searching by RA/DEC
out=specs.iloc[np.argmin(seps),['teff','teff_err','logg','logg_err','feh','feh_err']]
else:
return None
#Converting from df to Series:
out=out.iloc[0] if type(out)==pd.DataFrame else out
return out
def Assemble_and_run_isoclassify(icid,sc,mission,survey_dat,exofop_dat,errboost=0.2,spec_dat=None,
useGaiaLum=True,useGaiaBR=True,useBV=True,useGaiaSpec=True,
use2mass=True,useGriz=True,useGaiaAg=True):
############################################
# Building isoclassify input data: #
############################################
x=classify.obsdata()
mag=False
x.addcoords(sc.ra.deg,sc.dec.deg)
#Luminosity from Gaia:
if useGaiaLum and 'lum_val' in survey_dat.index:
if not np.isnan((survey_dat.lum_val+survey_dat.lum_percentile_upper+survey_dat.lum_percentile_lower)):
x.addlum([survey_dat.lum_val],[0.5*(survey_dat.lum_percentile_upper-survey_dat.lum_percentile_lower)])
#BR from Gaia:
if useGaiaBR and 'phot_g_mean_mag' in survey_dat.index and survey_dat.phot_g_mean_mag is not None:
if not np.isnan(np.sum(survey_dat[['phot_g_mean_mag','phot_bp_mean_mag','phot_rp_mean_mag',
'phot_g_mean_flux_over_error','phot_bp_mean_flux_over_error',
'phot_rp_mean_flux_over_error']].values.astype(np.float64))):
x.addgaia(survey_dat[['phot_g_mean_mag','phot_bp_mean_mag','phot_rp_mean_mag']].values.astype(np.float64),
errboost+np.log(1.0+1.0/survey_dat[['phot_g_mean_flux_over_error',
'phot_bp_mean_flux_over_error',
'phot_rp_mean_flux_over_error']].values.astype(np.float64))*2.5)
mag+=True
else:
print("No Gaia mag for",icid)
mag+=False
#BV from either APASS or Exofop:
if useBV and 'ap_Johnson B (B)' in survey_dat.index and not np.isnan(np.sum(survey_dat[['ap_Johnson B (B)','ap_Johnson V (V)','ap_Berr','ap_Verr']].values)):
#BV photometry (eg apass)
x.addbv([survey_dat['ap_Johnson B (B)'],survey_dat['ap_Johnson V (V)']],
[errboost+survey_dat['ap_Berr'],errboost+survey_dat['ap_Verr']])
mag+=True
elif useBV and 'B' in exofop_dat.index and not np.isnan(np.sum(exofop_dat[['B','V','Be','Ve']].values.astype(float))):
x.addbv([float(exofop_dat['B']),float(exofop_dat['V'])],
[errboost+float(exofop_dat['Be']),errboost+float(exofop_dat['Ve'])])
mag+=True
else:
print("No BV for",icid)
mag+=False
#Spectra either from APASS, or from user-uploaded file, or from Gaia spectrum:
if exofop_dat['spec'] is not None:
#From ExoFop - either has _user (K2) or _INSTRUMENT (TESS)
#If there's multiple spectra, we'll take the one with lowest Teff err:
if len(exofop_dat['spec'].split(','))>1:
src=exofop_dat['spec'].split(',')[np.min([spec_dat['teff_err'+spec_src] for spec_src in exofop_dat['spec'].split(',')])]
else:
src=exofop_dat['spec'].split(',')[0]
if 'logg'+src in spec_dat.index:
#Correcting possible problems (e.g. missing columns):
exofop_dat['feh'+src]=0.0 if 'feh'+src not in exofop_dat.index else exofop_dat['feh'+src]
if 'fehe'+src not in exofop_dat.index:
if 'fehep'+src in spec_dat.index:
exofop_dat['fehe'+src]=0.5*(abs(exofop_dat['fehep'+src])+abs(exofop_dat['fehem'+src]))
else:
exofop_dat['fehe'+src]=2.0
if 'logge'+src not in exofop_dat.index:
if 'loggep'+src in exofop_dat.index:
exofop_dat['logge'+src]=0.5*(abs(exofop_dat['loggep'+src])+abs(exofop_dat['loggem'+src]))
else:
exofop_dat['logge'+src]=2.5
if 'teffe'+src not in exofop_dat.index:
if 'teffep'+src in exofop_dat.index:
exofop_dat['teffe'+src]=0.5*(abs(exofop_dat['teffem'+src])+abs(exofop_dat['teffem'+src]))
else:
exofop_dat['teffe'+src]=250
x.addspec([exofop_dat['teff'+src], exofop_dat['logg'+src], exofop_dat['feh'+src]],
[exofop_dat['teffe'+src], exofop_dat['logge'+src], exofop_dat['fehe'+src]])
elif spec_dat is not None:
#From LAMOST or AAT or Coralie (cross-matched list in stellar folder)
x.addspec([spec_dat.teff, spec_dat.logg, spec_dat.feh],
[spec_dat.teff_err, spec_dat.logg_err, spec_dat.feh_err])
elif useGaiaSpec and 'teff_val' in survey_dat.index and not np.isnan(survey_dat.teff_val):
#From Gaia:
x.addspec([survey_dat.teff_val, survey_dat.rv_template_logg, 0.0],
[0.5*(survey_dat.teff_percentile_upper-survey_dat.teff_percentile_lower), 0.4, 1.0])
#2MASS JHK from Gaia-xmatched catalogue or from ExoFop:
if use2mass and '2m_ks_m' in survey_dat.index and not np.isnan(np.sum(survey_dat[['2m_j_m','2m_h_m','2m_ks_m',
'2m_j_msigcom','2m_h_msigcom','2m_ks_msigcom']].values)):
# 2MASS photometry
x.addjhk([survey_dat['2m_j_m'],survey_dat['2m_h_m'],survey_dat['2m_ks_m']],
[errboost+survey_dat['2m_j_msigcom'],errboost+survey_dat['2m_h_msigcom'],errboost+survey_dat['2m_ks_msigcom']])
mag+=True
elif use2mass and 'K' in exofop_dat.index and not np.isnan(np.sum(exofop_dat[['J','H','K',
'Je','He','Ke']].values.astype(float))):
x.addjhk([float(exofop_dat['J']),float(exofop_dat['H']),float(exofop_dat['K'])],
[errboost+float(exofop_dat['Je']),errboost+float(exofop_dat['He']),errboost+float(exofop_dat['Ke'])])
mag+=True
else:
print("No 2MASS for",icid)
mag+=False
#GRIZ photometry from APASS or Gaia-xmatched SDSS catalogue or from ExoFop:
if useGriz and "ap_Sloan z' (SZ)" in survey_dat.index and not np.isnan(np.sum(survey_dat[["ap_Sloan g' (SG)","ap_Sloan r' (SR)","ap_Sloan i' (SI)","ap_Sloan z' (SZ)","ap_SGerr","ap_SRerr","ap_SIerr","ap_SZerr"]].values)):
# 2MASS photometry
x.addgriz([survey_dat["ap_Sloan g' (SG)"],survey_dat["ap_Sloan r' (SR)"],
survey_dat["ap_Sloan i' (SI)"],survey_dat["ap_Sloan z' (SZ)"]],
[errboost+survey_dat["ap_SGerr"],errboost+survey_dat["ap_SRerr"],
errboost+survey_dat["ap_SIerr"],errboost+survey_dat["ap_SZerr"]])
mag+=True
elif useGriz and 'sd_z_mag' in survey_dat.index and not np.isnan(np.sum(survey_dat[["sd_g_mag","sd_r_mag","sd_i_mag","sd_z_mag","sd_g_mag_error","sd_r_mag_error","sd_i_mag_error","sd_z_mag_error"]].values)):
x.addgriz([survey_dat['sd_g_mag'],survey_dat['sd_r_mag'],survey_dat['sd_i_mag'],survey_dat['sd_z_mag']],
[errboost+survey_dat['sd_g_mag_error'],errboost+survey_dat['sd_r_mag_error'],
errboost+survey_dat['sd_i_mag_error'],errboost+survey_dat['sd_z_mag_error']])
mag+=True
elif useGriz and 'z' in exofop_dat.index and not np.isnan(np.sum(exofop_dat[["g","r","i","z","ge","re","ie","ze"]].values)):
x.addgriz([float(exofop_dat['g']),float(exofop_dat['r']),float(exofop_dat['i']),float(exofop_dat['z'])],
[errboost+float(exofop_dat['ge']),errboost+float(exofop_dat['re']),
errboost+float(exofop_dat['ie']),errboost+float(exofop_dat['ze'])])
mag+=True
else:
mag+=False
print("No griz for",icid)
#Gaia Ag
if useGaiaAg and 'a_g_val' in survey_dat.index and survey_dat['a_g_val'] is not None:
av=survey_dat['a_g_val']
else:
av=-99
#Gaia Parallax:
if 'parallax' in survey_dat.index and survey_dat.parallax is not None:
x.addplx(survey_dat.parallax/1000,survey_dat.parallax_error/1000)
#In a case where no magnitude is set, we assume V~kepmag/V~Tmag:
if not mag:
if 'tmag' in exofop_dat.index and ~np.isnan(exofop_dat['tmag']):
print("No archival photometry! Adding Tmag from input catalogue magnitude as V:",exofop_dat['tmag'])
x.addbv([-99,exofop_dat['tmag']],[-99,0.2])
elif 'kepmag' in exofop_dat.index and ~np.isnan(exofop_dat['kepmag']):
print("No archival photometry! Adding Kepmaf from input catalogue magnitude as V:",exofop_dat['kepmag'])
x.addbv([-99,exofop_dat['kepmag']],[-99,0.2])
############################################
# Running isoclassify: #
############################################
mod=LoadModel()
dustmodel,ext = LoadDust(sc,survey_dat.parallax/1000.,dust='allsky')
paras = classify.classify(input=x, model=mod, dustmodel=dustmodel, useav=av, ext=ext, plot=0)
############################################
# Assembling all output data: #
############################################
#Extracting parameters from isoclassify output class into pandas df:
col_names=['teff','teffep','teffem','logg','loggep','loggem','feh','fehep','fehem',
'rad','radep','radem','mass','massep','massem','rho','rhoep','rhoem',
'lum','lumep','lumem','avs','avsep','avsem','dis','disep','disem']#,'plx','plxep','plxem','mabs']
isoclass_df=pd.Series()
for c in col_names:
exec('isoclass_df[\"'+c+'\"]=paras.'+c)
#isoclass_df=isoclass_df.rename(index={'rho':'rho_gcm3','rhoep':'rho_gcm3ep','rhoem':'rho_gcm3em'})
#After much posturing, I have determined that these output "rho"s are in rho_S and not gcm3, so adding gcm3 values here:
isoclass_df['rho_gcm3']=isoclass_df['rho']*1.411
isoclass_df['rho_gcm3ep']=isoclass_df['rhoep']*1.411
isoclass_df['rho_gcm3em']=isoclass_df['rhoem']*1.411
return isoclass_df, paras
def starpars(icid,mission,errboost=0.1,return_best=True,
useGaiaLum=True,useGaiaBR=True,useGaiaSpec=True,
useBV=True,use2mass=True,useGriz=True,useGaiaAg=True):
# Estimating stellar parameters given survey data, input catalogues, and possibly follow-up data
#INPUTS:
# - icid (Mission ID in input catalogue)
# - mission ('tess','k2','kepler')
# - errboost (amount to artificially boost photometry errors due to possible systematics)
# - return_best (boolean. Only return best info, or return all data objects?)
# - useGaiaLum (Use luminosity as determined by Gaia)
# - useGaiaBR (Use Gaia B and R filter photometry)
# - useGaiaSpec (Use the Gaia estimates of logg and Teff as input spectra)
# - useBV (Use BV from survey data - e.g. APASS)
# - use2mass (Use 2MASS JHK from survey)
# - useGriz (Use BV from survey data - e.g. APASS or SDSS)
# - useGaiaAg (Use Reddening as determined by Gaia)
############################################
# Getting stellar data from Exofop: #
############################################
exofop_dat, kicinfo = GetExoFop(icid,mission)
#In the case where we only get KIC info, we just call this "ExoFop" too:
if exofop_dat is None and kicinfo is not None:
exofop_dat=kicinfo
exofop_dat['mission']='kep_or_k2'
############################################
# Getting survey data from [Various]: #
############################################
#Loading RA and Dec:
if type(exofop_dat['ra'])==str and (exofop_dat['ra'].find(':')!=-1)|(exofop_dat['ra'].find('h')!=-1):
coor=SkyCoord(exofop_dat['ra'],exofop_dat['dec'],unit=(u.hourangle,u.deg))
elif (type(exofop_dat['ra'])==float)|(type(exofop_dat['ra'])==np.float64) or (type(exofop_dat['ra'])==str)&(exofop_dat['ra'].find(',')!=-1):
coor=SkyCoord(exofop_dat['ra'],exofop_dat['dec'],unit=u.deg)
#Getting TIC, Spec and survey data:
#tic_dat = Catalogs.query_criteria("TIC",coordinates=coor,radius=20/3600,objType="STAR")#This is not used, as TIC is on ExoFop
spec_dat = CheckSpecCsv(coor,icid)
survey_dat=QueryGaiaAndSurveys(coor,mission=mission)
order_of_kw_to_remove=['useGaiaAg','useGriz','useBV','useGaiaBR','use2mass','useGaiaSpec','useGaiaLum']
n_kw_to_remove=0
isoclass_dens_is_NaN=True
#Isoclass often fails, e.g. due to photometry. So let's loop through the kwargs and gradually remove contraints:
while isoclass_dens_is_NaN and n_kw_to_remove<=len(order_of_kw_to_remove):
kwars={order_of_kw_to_remove[nkw]:(True if nkw>=n_kw_to_remove else False) for nkw in range(len(order_of_kw_to_remove))}
#print(n_kw_to_remove,'/',len(order_of_kw_to_remove),kwars)
try:
isoclass_df, paras = Assemble_and_run_isoclassify(icid,coor,mission,survey_dat,exofop_dat,
errboost=errboost*(1+0.33*n_kw_to_remove),spec_dat=spec_dat,**kwars)
#print(isoclass_df[['rho_gcm3','rho_gcm3ep','rho_gcm3em']])
isoclass_dens_is_NaN=(np.isnan(isoclass_df[['rho_gcm3','rho_gcm3ep','rho_gcm3em']]).any())|(isoclass_df[['rho_gcm3','rho_gcm3ep','rho_gcm3em']]==0.0).all()
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, exc_tb.tb_lineno)
isoclass_df,paras=None,None
#print(n_kw_to_remove,'|',isoclass_df)
n_kw_to_remove+=1
#Assessing which available data source is the *best* using lowest density error
if isoclass_df is not None:
isoclass_err_rho=(0.5*(abs(isoclass_df['rho_gcm3ep'])+abs(isoclass_df['rho_gcm3em'])))/isoclass_df['rho_gcm3']
else:
isoclass_err_rho=100
#Calculating
if 'rho' not in exofop_dat.index and 'rho_gcm3' not in exofop_dat.index and 'rad' in exofop_dat.index and (('mass' in exofop_dat.index)|('logg' in exofop_dat.index)):
#Getting Density from R, M and logg:
rhos=[];rhoems=[];rhoeps=[];rhoes=[]
if 'mass' in exofop_dat.index:
rhos+=[1.411*exofop_dat['mass']/exofop_dat['rad']**3]
rhoeps+=[1.411*(exofop_dat['mass']+exofop_dat['massep'])/((exofop_dat['rad']-exofop_dat['radem'])**3)-rhos[-1]]
rhoems+=[rhos[-1] - 1.411*(exofop_dat['mass']-exofop_dat['massem'])/((exofop_dat['rad']+exofop_dat['radep'])**3)]
rhoes+=[0.5*(abs(rhoeps[-1])+abs(rhoems[-1]))]
if 'logg' in exofop_dat.index:
rhos+=[np.power(10,exofop_dat['logg']-4.4377)/exofop_dat['rad']]
rhoeps+=[np.power(10,(exofop_dat['logg']+exofop_dat['loggep'])-4.4377)/(exofop_dat['rad']-exofop_dat['radem'])-rhos[-1]]
rhoems+=[rhos[-1]-np.power(10,(exofop_dat['logg']-exofop_dat['loggem'])-4.4377)/(exofop_dat['rad']+exofop_dat['rad'])]
rhoes+=[0.5*(abs(rhoeps[-1])+abs(rhpems[-1]))]
rhos=np.array(rhos)
rhoes=np.array(rhoes)
exofop_dat['rho_gcm3']=rhos[np.argmin(rhoes)]
exofop_dat['rho_gcm3e']=np.min(rhoes)
exofop_dat['rho_gcm3em']=rhoems[np.argmin(rhoes)]
exofop_dat['rho_gcm3ep']=rhoeps[np.min(rhoes)]
exofop_dat['rho']=exofop_dat['rho_gcm3']/1.411
exofop_dat['rhoe']=exofop_dat['rho_gcm3e']/1.411
exofop_dat['rhoem']=exofop_dat['rho_gcm3em']/1.411
exofop_dat['rhoep']=exofop_dat['rho_gcm3ep']/1.411
elif 'rho' in exofop_dat.index and 'rho_gcm3' not in exofop_dat.index:
exofop_dat['rho_gcm3']=exofop_dat['rho']*1.411
exofop_dat['rho_gcm3e']=0.5*(abs(exofop_dat['rhoep'])+abs(exofop_dat['rhoem']))*1.411
elif 'rho_gcm3' in exofop_dat.index and 'rho' not in exofop_dat.index:
exofop_dat['rho']=exofop_dat['rho_gcm3']/1.411
exofop_dat['rhoe']=0.5*(abs(exofop_dat['rho_gcm3ep'])+abs(exofop_dat['rho_gcm3em']))/1.411
exofop_dat['rhoep']=exofop_dat['rho_gcm3ep']/1.411
exofop_dat['rhoem']=exofop_dat['rho_gcm3em']/1.411
elif 'rho_gcm3' not in exofop_dat.index:
exofop_dat['rho_gcm3e']=100
exofop_dat['rho_gcm3']=1
if 'rho_gcm3em' in exofop_dat.index and 'rho_gcm3e' not in exofop_dat.index:
exofop_dat['rho_gcm3e']=0.5*(abs(exofop_dat['rho_gcm3ep'])+abs(exofop_dat['rho_gcm3em']))
#elif
#exofop_dat['rho_gcm3e']=0.5*(exofop_dat['rho_gcm3em']+exofop_dat['rho_gcm3ep'])
#Calculating percentage error on density from exofop/input catalogues:
if 'rho_gcm3' in exofop_dat.index and not np.isnan(float(exofop_dat['rho_gcm3'])):
#Checking if there is also a Kepler Input catalogue file, and whether the quoted density error is lower:
if kicinfo is not None and 'rho' in kicinfo.index:
if (kicinfo['rho_gcm3e']/kicinfo['rho_gcm3'])<(exofop_dat['rho_gcm3e']/exofop_dat['rho_gcm3']):
#Replacing data in exofop_dat with that from kicdat
for col in kicinfo.index:
exofop_dat[col]=kicinfo[col]
exofop_dat['source']='KIC'
inputcat_err_rho=(exofop_dat['rho_gcm3e'])/exofop_dat['rho_gcm3']
else:
inputcat_err_rho=100
print(inputcat_err_rho,exofop_dat['rho_gcm3e'],'<err | rho>',exofop_dat['rho_gcm3'])
print('Density errors. isoclassify:',isoclass_err_rho,', input cat:',inputcat_err_rho)
if isoclass_df is not None and abs(exofop_dat['rho_gcm3']-isoclass_df['rho_gcm3']) > abs(0.5*(abs(isoclass_df['rho_gcm3ep'])+abs(isoclass_df['rho_gcm3em']))+exofop_dat['rho_gcm3e']):
print('Densities disagree at >1-sigma | isoclassify:',isoclass_df['rho_gcm3'],0.5*(abs(isoclass_df['rho_gcm3ep'])+abs(isoclass_df['rho_gcm3em'])),'| input cat:',exofop_dat['rho_gcm3'],exofop_dat['rho_gcm3e'])
#Now we know which is best, we put that best info into "best_df"
best_df=pd.Series()
if mission[0] in ['T','t']:
best_df['tmag']=exofop_dat['tmag']
elif mission[0] in ['K','k']:
best_df['kepmag']=exofop_dat['kepmag']
best_df['ra']=exofop_dat['ra']
best_df['dec']=exofop_dat['dec']
#selecting the best stellar parameter source from input cat vs isoclassify
if isoclass_err_rho<inputcat_err_rho or np.isnan(inputcat_err_rho):
#Generated Density better constrained by isoclassify:
col_names=['teff','teffep','teffem','logg','loggep','loggem','lum','lumep','lumem',
'rad','radep','radem','mass','massep','massem','rho_gcm3','rho_gcm3ep','rho_gcm3em',
'dis','disep','disem']
for col in col_names:
best_df[col]=isoclass_df[col]
best_df['source']='isoclassify'
elif inputcat_err_rho<=isoclass_err_rho or np.isnan(isoclass_err_rho):
#input catalogue info better constrained
col_names=['teff','teffep','teffem','teffe','teff_prov','logg','loggep','loggem','logge','logg_prov',
'rad','radep','radem','rade','mass','massep','massem','masse'
'rho_gcm3','rho_gcm3e','rho_gcm3ep','rho_gcm3em','rho','rhoe','rhoep','rhoem']
if 'av' in exofop_dat.index:
col_names+=['avs','avsem','avsep']
if 'feh' in exofop_dat.index:
col_names+=['feh','fehem','fehep']
for col in col_names:
if col in exofop_dat.index:
best_df[col]=exofop_dat[col]
best_df['source']='input_catalogue'
#Converting rho in gcm3 to rho in rho_s
if 'rho_gcm3' in best_df.index:
coldic={'rho_gcm3':'rho','rho_gcm3em':'rhoem','rho_gcm3ep':'rhoep'}
for key in coldic:
best_df[coldic[key]]=best_df[key]/1.411
if return_best:
return best_df
else:
return exofop_dat,survey_dat,isoclass_df,paras,best_df
def getStellarDensity(ID,mission,errboost=0.1):
#Compiling dfs (which may have spectra)
exofop_dat,_,isoclass_df,_,_=starpars(ID,mission,errboost=0.1,return_best=False)
#Sorting out missing data and getting important info - Mass, Radius, density and logg:
if | pd.isnull(exofop_dat[['logg','mass']]) | pandas.isnull |
import argparse
from multiprocessing import Process, Queue
import time
import logging
log = logging.getLogger(__name__)
import cooler
import numpy as np
import pandas as pd
from hicmatrix import HiCMatrix
from hicmatrix.lib import MatrixFileHandler
from schicexplorer._version import __version__
from schicexplorer.utilities import cell_name_list
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
description=''
)
parserRequired = parser.add_argument_group('Required arguments')
# define the arguments
parserRequired.add_argument('--matrix', '-m',
help='The single cell Hi-C interaction matrices to investigate for QC. Needs to be in scool format',
metavar='scool scHi-C matrix',
required=True)
parserRequired.add_argument('--outFileName', '-o',
help='File name of the normalized scool matrix.',
required=True)
parserRequired.add_argument('--threads', '-t',
help='Number of threads. Using the python multiprocessing module.',
required=False,
default=4,
type=int)
parserRequired.add_argument('--normalize', '-n',
help='Normalize to a) all matrices to the lowest read count of the given matrices, b) all to a given read coverage value or c) to a multiplicative value',
choices=['smallest', 'read_count', 'multiplicative'],
default='smallest',
required=True)
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--setToZeroThreshold', '-z',
help='Values smaller as this threshold are set to 0.',
required=False,
default=1.0,
type=float)
parserOpt.add_argument('--value', '-v', default=1,
type=float,
help='This value is used to either be interpreted as the desired read_count or the multiplicative value. This depends on the value for --normalize')
parserOpt.add_argument('--maximumRegionToConsider',
help='To compute the normalization factor for the normalization mode \'smallest\' and \'read_count\', consider only this genomic distance around the diagonal.',
required=False,
default=30000000,
type=int)
parserOpt.add_argument('--help', '-h', action='help', help='show this help message and exit')
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def compute_sum(pMatrixName, pMatricesList, pMaxDistance, pQueue):
sum_list = []
for i, matrix in enumerate(pMatricesList):
matrixFileHandler = MatrixFileHandler(pFileType='cool', pMatrixFile=pMatrixName + '::' + matrix, pLoadMatrixOnly=True)
_matrix, cut_intervals, nan_bins, \
distance_counts, correction_factors = matrixFileHandler.load()
instances = _matrix[0]
features = _matrix[1]
distances = np.absolute(instances - features)
mask = distances <= pMaxDistance
sum_of_matrix = _matrix[2][mask].sum()
sum_list.append(sum_of_matrix)
del _matrix
pQueue.put(sum_list)
def compute_normalize(pMatrixName, pMatricesList, pNormalizeMax, pSumOfAll, pThreshold, pMultiplicative, pQueue):
pixelList = []
for i, matrix in enumerate(pMatricesList):
matrixFileHandler = MatrixFileHandler(pFileType='cool', pMatrixFile=pMatrixName + '::' + matrix, pLoadMatrixOnly=True)
_matrix, cut_intervals, nan_bins, \
distance_counts, correction_factors = matrixFileHandler.load()
data = np.array(_matrix[2]).astype(np.float32)
instances = np.array(_matrix[0])
features = np.array(_matrix[1])
mask = np.isnan(data)
data[mask] = 0
mask = np.isinf(data)
data[mask] = 0
if pMultiplicative is None:
adjust_factor = pSumOfAll[i] / pNormalizeMax
else:
adjust_factor = pMultiplicative
if pMultiplicative is None:
data /= adjust_factor
else:
data *= adjust_factor
mask = np.isnan(data)
data[mask] = 0
mask = np.isinf(data)
data[mask] = 0
mask = data < pThreshold
data[mask] = 0
mask = data == 0
instances = instances[~mask]
features = features[~mask]
data = data[~mask]
pixels = | pd.DataFrame({'bin1_id': instances, 'bin2_id': features, 'count': data}) | pandas.DataFrame |
"""
Computational Cancer Analysis Library
Authors:
Huwate (Kwat) Yeerna (Medetgul-Ernar)
<EMAIL>
Computational Cancer Analysis Laboratory, UCSD Cancer Center
<NAME>
<EMAIL>
Computational Cancer Analysis Laboratory, UCSD Cancer Center
"""
from os.path import isfile
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.cm import bwr, gist_rainbow
from matplotlib.colorbar import ColorbarBase, make_axes
from matplotlib.colors import (ColorConverter, LinearSegmentedColormap,
ListedColormap, Normalize)
from matplotlib.gridspec import GridSpec
from matplotlib.pyplot import (figure, gca, savefig, sca, subplot, suptitle,
tight_layout)
from numpy import array, unique
from pandas import DataFrame, Series, isnull
from seaborn import (barplot, boxplot, clustermap, despine, distplot, heatmap,
set_style, violinplot)
from .d2 import get_dendrogram_leaf_indices, normalize_2d_or_1d
from .file import establish_filepath
# ==============================================================================
# Style
# ==============================================================================
FIGURE_SIZE = (16, 10)
SPACING = 0.05
FONT_LARGEST = {'fontsize': 24, 'weight': 'bold', 'color': '#220530'}
FONT_LARGER = {'fontsize': 20, 'weight': 'bold', 'color': '#220530'}
FONT_STANDARD = {'fontsize': 16, 'weight': 'bold', 'color': '#220530'}
FONT_SMALLER = {'fontsize': 12, 'weight': 'bold', 'color': '#220530'}
# Color maps
C_BAD = 'wheat'
# Continuous 1
CMAP_CONTINUOUS = bwr
CMAP_CONTINUOUS.set_bad(C_BAD)
# Continuous 2
reds = [0.26, 0.26, 0.26, 0.39, 0.69, 1, 1, 1, 1, 1, 1]
greens_half = [0.26, 0.16, 0.09, 0.26, 0.69]
colordict = {
'red':
tuple([(0.1 * i, r, r) for i, r in enumerate(reds)]),
'green':
tuple([
(0.1 * i, r, r)
for i, r in enumerate(greens_half + [1] + list(reversed(greens_half)))
]),
'blue':
tuple([(0.1 * i, r, r) for i, r in enumerate(reversed(reds))])
}
CMAP_CONTINUOUS_ASSOCIATION = LinearSegmentedColormap('association', colordict)
CMAP_CONTINUOUS_ASSOCIATION.set_bad(C_BAD)
# Categorical
CMAP_CATEGORICAL = gist_rainbow
CMAP_CATEGORICAL.set_bad(C_BAD)
# Binary
CMAP_BINARY = ListedColormap(['#cdcdcd', '#404040'])
CMAP_BINARY.set_bad(C_BAD)
DPI = 300
# ==============================================================================
# Functions
# ==============================================================================
def plot_points(*args,
title='',
xlabel='',
ylabel='',
filepath=None,
file_extension='pdf',
dpi=DPI,
ax=None,
**kwargs):
"""
:param args:
:param title:
:param xlabel:
:param ylabel:
:param filepath:
:param file_extension:
:param dpi:
:param kwargs:
:return: None
"""
if not ax:
figure(figsize=FIGURE_SIZE)
ax = gca()
if 'linestyle' not in kwargs:
kwargs['linestyle'] = ''
if 'marker' not in kwargs:
kwargs['marker'] = '.'
ax.plot(*args, **kwargs)
decorate(style='ticks', title=title, xlabel=xlabel, ylabel=ylabel)
if filepath:
save_plot(filepath, file_extension=file_extension, dpi=dpi)
def plot_distribution(a,
bins=None,
hist=True,
kde=True,
rug=False,
fit=None,
hist_kws=None,
kde_kws=None,
rug_kws=None,
fit_kws=None,
color=None,
vertical=False,
norm_hist=False,
axlabel=None,
label=None,
ax=None,
title='',
xlabel='',
ylabel='Frequency',
filepath=None,
file_extension='pdf',
dpi=DPI):
"""
:param a:
:param bins:
:param hist:
:param kde:
:param rug:
:param fit:
:param hist_kws:
:param kde_kws:
:param rug_kws:
:param fit_kws:
:param color:
:param vertical:
:param norm_hist:
:param axlabel:
:param label:
:param ax:
:param title:
:param xlabel:
:param ylabel:
:param filepath:
:param file_extension:
:param dpi:
:return: None
"""
if not ax:
figure(figsize=FIGURE_SIZE)
distplot(
a,
bins=bins,
hist=hist,
kde=kde,
rug=rug,
fit=fit,
hist_kws=hist_kws,
kde_kws=kde_kws,
rug_kws=rug_kws,
fit_kws=fit_kws,
color=color,
vertical=vertical,
norm_hist=norm_hist,
axlabel=axlabel,
label=label,
ax=ax)
decorate(style='ticks', title=title, xlabel=xlabel, ylabel=ylabel)
if filepath:
save_plot(filepath, file_extension=file_extension, dpi=dpi)
def plot_violin_box_or_bar(x=None,
y=None,
hue=None,
data=None,
order=None,
hue_order=None,
bw='scott',
cut=2,
scale='count',
scale_hue=True,
gridsize=100,
width=0.8,
inner='quartile',
split=False,
orient=None,
linewidth=None,
color=None,
palette=None,
saturation=0.75,
ax=None,
fliersize=5,
whis=1.5,
notch=False,
ci=95,
n_boot=1000,
units=None,
errcolor='0.26',
errwidth=None,
capsize=None,
violin_or_box='violin',
colors=(),
figure_size=FIGURE_SIZE,
title=None,
xlabel=None,
ylabel=None,
filepath=None,
file_extension='pdf',
dpi=DPI,
**kwargs):
"""
Plot violin plot.
:param x:
:param y:
:param hue:
:param data:
:param order:
:param hue_order:
:param bw:
:param cut:
:param scale:
:param scale_hue:
:param gridsize:
:param width:
:param inner:
:param split:
:param orient:
:param linewidth:
:param color:
:param palette:
:param saturation:
:param ax:
:param fliersize:
:param whis:
:param notch:
:param ci:
:param n_boot:
:param units:
:param errcolor:
:param errwidth:
:param capsize:
:param violin_or_box:
:param colors: iterable;
:param figure_size: tuple;
:param title:
:param xlabel:
:param ylabel:
:param filepath:
:param file_extension:
:param dpi:
:param kwargs:
:return: None
"""
# Initialize a figure
if not ax:
figure(figsize=figure_size)
if isinstance(x, str):
x = data[x]
if isinstance(y, str):
y = data[y]
if not palette:
palette = assign_colors_to_states(x, colors=colors)
if len(set([v for v in y
if v and ~isnull(v)])) <= 2: # Use barplot for binary
barplot(
x=x,
y=y,
hue=hue,
data=data,
order=order,
hue_order=hue_order,
ci=ci,
n_boot=n_boot,
units=units,
orient=orient,
color=color,
palette=palette,
saturation=saturation,
errcolor=errcolor,
ax=ax,
errwidth=errwidth,
capsize=capsize,
**kwargs)
else: # Use violin or box plot for continuous or categorical
if violin_or_box == 'violin':
violinplot(
x=x,
y=y,
hue=hue,
data=data,
order=order,
hue_order=hue_order,
bw=bw,
cut=cut,
scale=scale,
scale_hue=scale_hue,
gridsize=gridsize,
width=width,
inner=inner,
split=split,
orient=orient,
linewidth=linewidth,
color=color,
palette=palette,
saturation=saturation,
ax=ax,
**kwargs)
elif violin_or_box == 'box':
boxplot(
x=x,
y=y,
hue=hue,
data=data,
order=order,
hue_order=hue_order,
orient=orient,
color=color,
palette=palette,
saturation=saturation,
width=width,
fliersize=fliersize,
linewidth=linewidth,
whis=whis,
notch=notch,
ax=ax,
**kwargs)
else:
raise ValueError(
'\'violin_or_box\' must be either \'violin\' or \'box\'.')
decorate(style='ticks', title=title, xlabel=xlabel, ylabel=ylabel)
if filepath:
save_plot(filepath, file_extension=file_extension, dpi=dpi)
def plot_heatmap(dataframe,
vmin=None,
vmax=None,
cmap=None,
center=None,
robust=False,
annot=None,
fmt='.2g',
annot_kws=None,
linewidths=0,
linecolor='white',
cbar=False,
cbar_kws=None,
cbar_ax=None,
square=False,
xticklabels=False,
yticklabels=False,
mask=None,
figure_size=FIGURE_SIZE,
data_type='continuous',
normalization_method=None,
normalization_axis=0,
max_std=3,
axis_to_sort=None,
cluster=False,
row_annotation=(),
column_annotation=(),
annotation_colors=(),
title=None,
xlabel=None,
ylabel=None,
xlabel_rotation=0,
ylabel_rotation=90,
xtick_rotation=90,
ytick_rotation=0,
filepath=None,
file_extension='pdf',
dpi=DPI,
**kwargs):
"""
Plot heatmap.
:param dataframe:
:param vmin:
:param vmax:
:param cmap:
:param center:
:param robust:
:param annot:
:param fmt:
:param annot_kws:
:param linewidths:
:param linecolor:
:param cbar:
:param cbar_kws:
:param cbar_ax:
:param square:
:param xticklabels:
:param yticklabels:
:param mask:
:param figure_size:
:param data_type:
:param normalization_method:
:param normalization_axis:
:param max_std:
:param axis_to_sort:
:param cluster:
:param row_annotation:
:param column_annotation:
:param annotation_colors: list; a list of matplotlib color specifications
:param title:
:param xlabel:
:param ylabel:
:param xlabel_rotation:
:param ylabel_rotation:
:param xtick_rotation:
:param ytick_rotation:
:param filepath:
:param file_extension:
:param dpi:
:param kwargs:
:return: None
"""
df = dataframe.copy()
if normalization_method:
df = normalize_2d_or_1d(
df, normalization_method,
axis=normalization_axis).clip(-max_std, max_std)
if len(row_annotation) or len(column_annotation):
if len(row_annotation):
if isinstance(row_annotation, Series):
row_annotation = row_annotation.copy()
if not len(row_annotation.index & df.index): # Series
# but without proper index
row_annotation.index = df.index
else:
row_annotation = Series(row_annotation, index=df.index)
row_annotation.sort_values(inplace=True)
df = df.ix[row_annotation.index, :]
if len(column_annotation):
if isinstance(column_annotation, Series):
column_annotation = column_annotation.copy()
# Series but without proper index
if not len(column_annotation.index & df.columns):
column_annotation.index = df.columns
else:
column_annotation = Series(column_annotation, index=df.columns)
column_annotation.sort_values(inplace=True)
df = df.ix[:, column_annotation.index]
if axis_to_sort in (0, 1):
a = array(df)
a.sort(axis=axis_to_sort)
df = DataFrame(a, index=df.index)
elif cluster:
row_indices, column_indices = get_dendrogram_leaf_indices(dataframe)
df = df.iloc[row_indices, column_indices]
if isinstance(row_annotation, Series):
row_annotation = row_annotation.iloc[row_indices]
if isinstance(column_annotation, Series):
column_annotation = column_annotation.iloc[column_indices]
figure(figsize=figure_size)
gridspec = GridSpec(10, 10)
ax_top = subplot(gridspec[0:1, 2:-2])
ax_center = subplot(gridspec[1:8, 2:-2])
ax_bottom = subplot(gridspec[8:10, 2:-2])
ax_left = subplot(gridspec[1:8, 1:2])
ax_right = subplot(gridspec[1:8, 8:9])
ax_top.axis('off')
ax_bottom.axis('off')
ax_left.axis('off')
ax_right.axis('off')
if not cmap:
if data_type == 'continuous':
cmap = CMAP_CONTINUOUS
elif data_type == 'categorical':
cmap = CMAP_CATEGORICAL
elif data_type == 'binary':
cmap = CMAP_BINARY
else:
raise ValueError(
'Target data type must be continuous, categorical, or binary.')
heatmap(
df,
vmin=vmin,
vmax=vmax,
cmap=cmap,
center=center,
robust=robust,
annot=annot,
fmt=fmt,
annot_kws=annot_kws,
linewidths=linewidths,
linecolor=linecolor,
cbar=cbar,
cbar_kws=cbar_kws,
cbar_ax=cbar_ax,
square=square,
ax=ax_center,
xticklabels=xticklabels,
yticklabels=yticklabels,
mask=mask,
**kwargs)
# Get values for making legend
values = unique(df.values)
values = values[~isnull(values)]
if data_type == 'continuous': # Plot colorbar
# Get not-nan values for computing min, mean, & max
min_ = values.min()
mean_ = values.mean()
max_ = values.max()
cax, kw = make_axes(
ax_bottom,
location='bottom',
fraction=0.16,
cmap=cmap,
norm=Normalize(min_, max_),
ticks=[min_, mean_, max_])
ColorbarBase(cax, **kw)
decorate(ax=cax, xtick_rotation=90)
elif data_type in ('categorical', 'binary'): # Plot category legends
if len(values) < 30:
horizontal_span = ax_center.axis()[1]
vertical_span = ax_center.axis()[3]
colors = assign_colors_to_states(values, colors=cmap)
columns = df.columns.tolist()
if isinstance(columns[0], str):
max_len_c = max([len(c) for c in columns])
else:
max_len_c = 10
vertical_offset = 0.016 * max_len_c
for i, v in enumerate(values):
x = (horizontal_span / len(values) / 2) + \
i * horizontal_span / len(values)
y = 0 - vertical_span * vertical_offset
c = colors[v]
ax_center.plot(
x, y, 'o', color=c, markersize=16, aa=True, clip_on=False)
ax_center.text(
x,
y - vertical_span * 0.05,
v,
horizontalalignment='center',
**FONT_STANDARD)
decorate(
title=title,
xlabel=xlabel,
ylabel=ylabel,
xlabel_rotation=xlabel_rotation,
ylabel_rotation=ylabel_rotation,
xtick_rotation=xtick_rotation,
ytick_rotation=ytick_rotation,
ax=ax_center)
if len(row_annotation):
if len(set(row_annotation)) <= 2:
cmap = CMAP_BINARY
else:
if len(annotation_colors):
cmap = ListedColormap(annotation_colors)
else:
cmap = CMAP_CATEGORICAL
heatmap(
| DataFrame(row_annotation) | pandas.DataFrame |
from typing import Tuple, List, Dict, Any
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, Imputer, FunctionTransformer
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV, cross_validate, KFold
from sklearn.metrics import mean_squared_error, make_scorer
import joblib
import mlflow
pd.options.display.max_columns = None
CURRENT_EXPERIMENT_NAME = 'feature engineering'
def filter_by(df: pd.DataFrame, **kwargs) -> pd.DataFrame:
df_out = df
for key, value in kwargs.items():
if type(value) is list:
df_out = df_out[df_out[key].isin(value)]
else:
df_out = df_out[df_out[key] == value]
return df_out
def missing_rate(df: pd.DataFrame) -> pd.Series:
return df.isnull().sum() / len(df)
def reduce_mem_usage(df: pd.DataFrame, verbose: bool = True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / (1024 ** 2)
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose:
print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(
end_mem, 100 * (start_mem - end_mem) / start_mem)
)
return df
def rmse(y_true, y_pred) -> float:
return np.sqrt(mean_squared_error(y_true, y_pred))
rmse_score = make_scorer(rmse, greater_is_better=False)
def add_key_prefix(d: Dict, prefix = 'best_') -> Dict:
return {prefix + key: value for key, value in d.items()}
def df_from_cv_results(d: Dict):
df = pd.DataFrame(d)
score_columns = ['mean_test_score', 'mean_train_score']
param_columns = [c for c in df.columns if c.startswith('param_')]
return pd.concat([
-df.loc[:, score_columns],
df.loc[:, param_columns],
], axis=1).sort_values(by='mean_test_score')
def sample(*args, frac: float = 0.01) -> np.ndarray:
n_rows = args[0].shape[0]
random_index = np.random.choice(n_rows, int(n_rows * frac), replace=False)
gen = (
a[random_index] for a in args
)
if len(args) == 1:
return next(gen)
else:
return gen
class BaseTransformer(BaseEstimator, TransformerMixin):
def fit(self, x: pd.DataFrame, y = None):
return self
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
return x
class ColumnTransformer(BaseTransformer):
def __init__(self, defs: Dict[str, BaseTransformer]):
self.defs = defs
def fit(self, x: pd.DataFrame, y: np.ndarray = None):
for col, transformer in self.defs.items():
transformer.fit(x[col], y)
return self
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
xp = x.copy()
for col, transformer in self.defs.items():
xp[col] = transformer.transform(x[col])
return xp
def fit_transform(self, x: pd.DataFrame, y: np.ndarray = None) -> pd.DataFrame:
xp = x.copy()
for col, transformer in self.defs.items():
if hasattr(transformer, 'fit_transform'):
xp[col] = transformer.fit_transform(x[col], y)
else:
xp[col] = transformer.fit(x[col], y).transform(x[col])
return xp
class WrappedLabelEncoder(BaseTransformer):
def __init__(self):
self.le = LabelEncoder()
def fit(self, x, y = None):
self.le.fit(x)
return self
def transform(self, x):
return self.le.transform(x)
class WeatherImputer(BaseTransformer):
def transform(self, w: pd.DataFrame) -> pd.DataFrame:
# add missing datetime
dt_min, dt_max = w['timestamp'].min(), w['timestamp'].max()
empty_df = pd.DataFrame({'timestamp': pd.date_range(start=dt_min, end=dt_max, freq='H')})
w_out = pd.concat([
ws.merge(
empty_df, on='timestamp', how='outer'
).sort_values(
by='timestamp'
).assign(
site_id=site_id
) for site_id, ws in w.groupby('site_id')
], ignore_index=True)
# large missing rate columns; fill by -999
w_out['cloud_coverage'] = w_out['cloud_coverage'].fillna(-999).astype(np.int16)
# small missing rate columns; fill by same value forward and backward
w_out = pd.concat([
ws.fillna(method='ffill').fillna(method='bfill') for _, ws in w_out.groupby('site_id')
], ignore_index=True)
# fill nan by mean over all sites
w_mean = w_out.groupby('timestamp').mean().drop(columns=['site_id']).reset_index()
w_mean = w_out.loc[:, ['site_id', 'timestamp']].merge(w_mean, on='timestamp', how='left')
w_out = w_out.where(~w_out.isnull(), w_mean)
# float -> uint
w_out['site_id'] = w_out['site_id'].astype(np.uint8)
return w_out
class WeatherEngineerer(BaseTransformer):
@staticmethod
def shift_by(wdf: pd.DataFrame, n: int) -> pd.DataFrame:
method = 'bfill' if n > 0 else 'ffill'
return pd.concat([
ws.iloc[:, [2, 4, 8]].shift(n).fillna(method=method) for _, ws in wdf.groupby('site_id')
], axis=0)
def weather_weighted_average(self, w: pd.DataFrame, hours: int = 5) -> pd.DataFrame:
ahours = abs(hours)
sign = int(hours / ahours)
w_weighted_average = sum(
[self.shift_by(w, (i+1)*sign) * (ahours-i) for i in range(ahours)]
) / (np.arange(ahours) + 1).sum()
w_weighted_average.columns = ['{0}_wa{1}'.format(c, hours) for c in w_weighted_average.columns]
return pd.concat([w, w_weighted_average], axis=1)
@staticmethod
def dwdt(df: pd.DataFrame, base_col: str) -> pd.DataFrame:
df_out = df.copy()
df_out[base_col + '_dt_wa1'] = df[base_col] - df[base_col + '_wa1']
df_out[base_col + '_dt_wa-1'] = df[base_col] - df[base_col + '_wa-1']
df_out[base_col + '_dt_wa5'] = df[base_col] - df[base_col + '_wa5']
df_out[base_col + '_dt_wa-5'] = df[base_col] - df[base_col + '_wa-5']
return df_out
@staticmethod
def wet(df: pd.DataFrame, suffix: str) -> pd.DataFrame:
df_out = df.copy()
df_out['wet' + suffix] = df['air_temperature' + suffix] - df['dew_temperature' + suffix]
return df_out
def transform(self, w_in: pd.DataFrame) -> pd.DataFrame:
w = w_in.pipe(self.weather_weighted_average, hours=1) \
.pipe(self.weather_weighted_average, hours=-1) \
.pipe(self.weather_weighted_average) \
.pipe(self.weather_weighted_average, hours=-5)
w = w.pipe(self.dwdt, base_col='air_temperature') \
.pipe(self.dwdt, base_col='dew_temperature') \
.pipe(self.dwdt, base_col='wind_speed') \
.pipe(self.wet, suffix='') \
.pipe(self.wet, suffix='_wa1') \
.pipe(self.wet, suffix='_wa-1') \
.pipe(self.wet, suffix='_wa5') \
.pipe(self.wet, suffix='_wa-5')
return w
class WindDirectionEncoder(BaseTransformer):
@staticmethod
def _from_degree(degree: int) -> int:
val = int((degree / 22.5) + 0.5)
arr = [i for i in range(0,16)]
return arr[(val % 16)]
def transform(self, x: pd.Series) -> pd.Series:
return x.apply(self._from_degree)
class WindSpeedEncoder(BaseTransformer):
def transform(self, x: pd.Series) -> pd.Series:
return pd.cut(
x,
bins=[0, 0.3, 1.6, 3.4, 5.5, 8, 10.8, 13.9, 17.2, 20.8, 24.5, 28.5, 33, 1000],
right=False, labels=False,
)
weather_pipeline = Pipeline(steps=[
('impute_missing_value', WeatherImputer()),
('feature_engineering', WeatherEngineerer()),
('label_encode', ColumnTransformer({
'wind_direction': WindDirectionEncoder(),
'wind_speed': WindSpeedEncoder(),
'wind_speed_wa1': WindSpeedEncoder(),
'wind_speed_wa-1': WindSpeedEncoder(),
'wind_speed_wa5': WindSpeedEncoder(),
'wind_speed_wa-5': WindSpeedEncoder(),
}))
])
class BuildingMetadataEngineerer(BaseTransformer):
def transform(self, bm_in: pd.DataFrame) -> pd.DataFrame:
bm = bm_in.copy()
bm['log_square_feet'] = np.log(bm['square_feet'])
bm['square_feet_per_floor'] = bm['square_feet'] / bm['floor_count']
bm['log_square_feet_per_floor'] = bm['log_square_feet'] / bm['floor_count']
bm['building_age'] = 2019 - bm['year_built']
bm['square_feet_per_age'] = bm['square_feet'] / bm['building_age']
bm['log_square_feet_per_age'] = bm['log_square_feet'] / bm['building_age']
return bm
class BuildingMetadataImputer(BaseTransformer):
def transform(self, bm: pd.DataFrame) -> pd.DataFrame:
return bm.fillna(-999)
building_metadata_pipeline = Pipeline(steps=[
('label_encode', ColumnTransformer({
'primary_use': WrappedLabelEncoder(),
})),
('feature_engineering', BuildingMetadataEngineerer()),
('impute_missing_value', BuildingMetadataImputer()),
])
class BuildingMetaJoiner(BaseTransformer):
def __init__(self, bm: pd.DataFrame = None):
self.bm = bm
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
if self.bm is None:
return x
else:
return x.merge(
self.bm,
on='building_id',
how='left',
)
class WeatherJoiner(BaseTransformer):
def __init__(self, w: pd.DataFrame = None):
self.w = w
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
if self.w is None:
return x
else:
return x.merge(
self.w,
on=['site_id', 'timestamp'],
how='left',
)
class DatetimeFeatureEngineerer(BaseTransformer):
def __init__(self, col: str = 'timestamp'):
self.col = col
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
xp = x.copy()
ts = x[self.col]
xp['month'] = ts.dt.month.astype(np.int8)
xp['week'] = ts.dt.week.astype(np.int8)
xp['day_of_week'] = ts.dt.weekday.astype(np.int8)
xp['time_period'] = pd.cut(
ts.dt.hour,
bins=[0, 3, 6, 9, 12, 15, 18, 21, 25],
right=False, labels=False,
)
holidays = [
'2016-01-01', '2016-01-18', '2016-02-15', '2016-05-30', '2016-07-04',
'2016-09-05', '2016-10-10', '2016-11-11', '2016-11-24', '2016-12-26',
'2017-01-01', '2017-01-16', '2017-02-20', '2017-05-29', '2017-07-04',
'2017-09-04', '2017-10-09', '2017-11-10', '2017-11-23', '2017-12-25',
'2018-01-01', '2018-01-15', '2018-02-19', '2018-05-28', '2018-07-04',
'2018-09-03', '2018-10-08', '2018-11-12', '2018-11-22', '2018-12-25',
'2019-01-01'
]
xp['is_holiday'] = (ts.dt.date.astype('str').isin(holidays)).astype(np.int8)
return xp
class TargetEncoder(BaseTransformer):
def __init__(self, cv: int = 5, smoothing: int = 1):
self.agg = None
self.cv = cv
self.smoothing = 1
def transform(self, x: pd.Series):
if self.agg is None:
raise ValueError('you shold fit() before predict()')
encoded = pd.merge(x, self.agg, left_on=x.name, right_index=True, how='left')
encoded = encoded.fillna(encoded.mean())
xp = encoded['y']
xp.name = x.name
return xp
def fit_transform(self, x: pd.Series, y: np.ndarray = None) -> pd.Series:
df = pd.DataFrame({'x': x, 'y': y})
self.agg = df.groupby('x').mean()
fold = KFold(n_splits=self.cv, shuffle=True)
xp = x.copy()
for idx_train, idx_test in fold.split(x):
df_train = df.loc[idx_train, :]
df_test = df.loc[idx_test, :]
agg_train = df_train.groupby('x').mean()
encoded = pd.merge(df_test, agg_train, left_on='x', right_index=True, how='left', suffixes=('', '_mean'))['y_mean']
encoded = encoded.fillna(encoded.mean())
xp[encoded.index] = encoded
return xp
class ColumnDropper(BaseTransformer):
def __init__(self, cols: List[str]):
self.cols = cols
def transform(self, x: pd.DataFrame, y = None) -> pd.DataFrame:
return x.drop(columns=self.cols)
class ArrayTransformer(BaseTransformer):
def transform(self, x: pd.DataFrame, y = None) -> np.ndarray:
return x.values
def pipeline_factory() -> Pipeline:
return Pipeline(steps=[
# join
('join_building_meta', BuildingMetaJoiner(
building_metadata_pipeline.fit_transform(
building_metadata
)
)),
('join_weather', WeatherJoiner(
weather_pipeline.fit_transform(
pd.concat([weather_train, weather_test], axis=0, ignore_index=True)
)
)),
# feature engineering
('feature_engineering_from_datetime', DatetimeFeatureEngineerer()),
('target_encode', ColumnTransformer({
'primary_use': TargetEncoder(),
'meter': TargetEncoder(),
'cloud_coverage': TargetEncoder(),
'time_period': TargetEncoder(),
'wind_direction': TargetEncoder(),
'wind_speed': TargetEncoder(),
'wind_speed_wa1': TargetEncoder(),
'wind_speed_wa-1': TargetEncoder(),
'wind_speed_wa5': TargetEncoder(),
'wind_speed_wa-5': TargetEncoder(),
})),
# drop columns
('drop_columns', ColumnDropper([
'building_id', 'timestamp', 'site_id', 'precip_depth_1_hr',
])),
# pd.DataFrame -> np.ndarray
('df_to_array', ArrayTransformer()),
# regressor
('regressor', RandomForestRegressor()),
])
def cv(pipeline: Pipeline, df: pd.DataFrame, n_jobs: int = -1, **params) -> Tuple[float, float]:
x = df.drop(columns='meter_reading')
y = np.log1p(df['meter_reading'].values)
default_params = dict(
n_estimators=10,
max_depth=None,
max_features='auto',
min_samples_leaf=1,
)
merged_params = {**default_params, **params}
pipeline_params = {**merged_params, 'n_jobs': n_jobs}
pipeline_params = add_key_prefix(pipeline_params, 'regressor__')
pipeline.set_params(**pipeline_params)
mlflow.set_experiment(CURRENT_EXPERIMENT_NAME)
with mlflow.start_run():
mlflow.log_params(merged_params)
scores = cross_validate(
pipeline, x, y,
cv=3,
scoring=rmse_score,
return_train_score=True,
verbose=2,
)
rmse_val = - np.mean(scores['test_score'])
rmse_train = - np.mean(scores['train_score'])
mlflow.log_metrics(dict(
rmse_val=rmse_val,
rmse_train=rmse_train,
))
return rmse_val, rmse_train
def oneshot(pipeline: Pipeline, df: pd.DataFrame, **params):
x = df.drop(columns='meter_reading')
y = np.log1p(df['meter_reading'].values)
default_params = dict(
n_estimators=10,
max_depth=None,
max_features='auto',
min_samples_leaf=1,
)
merged_params = {**default_params, **params}
pipeline_params = {**merged_params, 'n_jobs': -1, 'verbose': 2}
pipeline_params = add_key_prefix(pipeline_params, 'regressor__')
pipeline.set_params(**pipeline_params)
mlflow.set_experiment(CURRENT_EXPERIMENT_NAME)
with mlflow.start_run():
mlflow.log_params(merged_params)
pipeline.fit(x, y)
joblib.dump(pipeline, 'out/pipeline.sav', compress=1)
score = rmse(y, pipeline.predict(x))
mlflow.log_metrics(dict(rmse_train=score))
mlflow.log_artifact('out/pipeline.sav')
return pipeline
def grid_search(pipeline: Pipeline, df: pd.DataFrame, n_jobs: int = -1, **param_grid):
x = df.drop(columns='meter_reading')
y = np.log1p(df['meter_reading'].values)
default_param_grid = dict(
n_estimators=[80],
max_depth=[None],
max_features=['auto'],
min_samples_leaf=[0.00003],
)
merged_param_grid = {**default_param_grid, **param_grid}
pipeline_param_grid = add_key_prefix(merged_param_grid, 'regressor__')
pipeline.set_params(regressor__n_jobs=n_jobs)
mlflow.set_experiment(CURRENT_EXPERIMENT_NAME)
with mlflow.start_run():
mlflow.log_params(merged_param_grid)
regressor = GridSearchCV(
pipeline,
param_grid=pipeline_param_grid,
cv=3,
scoring=rmse_score,
verbose=2,
refit=True,
)
regressor.fit(x, y)
best_model = regressor.best_estimator_
best_param = add_key_prefix(regressor.best_params_)
best_rmse = - regressor.best_score_
cv_results = df_from_cv_results(regressor.cv_results_)
joblib.dump(best_model, 'out/model.sav')
cv_results.to_csv('out/cv_results.csv', index=False)
mlflow.log_params(best_param)
mlflow.log_metrics(dict(
rmse=best_rmse,
))
mlflow.log_artifact('./out/model.sav')
mlflow.log_artifact('./out/cv_results.csv')
mlflow.end_run()
return cv_results
def load_model(run_id: str = None):
if run_id is None:
model_path = 'out/model.joblib'
else:
mlflow_client = mlflow.tracking.MlflowClient()
model_path = mlflow_client.download_artifacts(run_id, 'model.joblib')
return joblib.load(model_path)
def predict(df: pd.DataFrame, pipeline: Pipeline) -> pd.DataFrame:
x = df.iloc[:, 1:]
y_log1p = pipeline.predict(x)
y = np.expm1(y_log1p)
return pd.DataFrame({
'row_id': df.iloc[:, 0],
'meter_reading': y,
})[['row_id', 'meter_reading']]
if __name__ == '__main__':
train = | pd.read_csv('data/train.csv', parse_dates=['timestamp']) | pandas.read_csv |
import os
import pandas as pd
from sklearn.preprocessing import LabelEncoder,OneHotEncoder,MinMaxScaler
from config import *
import numpy as np
import json
class Dataset:
def __init__(self,data_path):
self.data_path = data_path
self.load_dataset()
def load_dataset(self):
if not os.path.exists(self.data_path):
raise Exception('File Not Found')
self.dataset = | pd.read_csv(self.data_path) | pandas.read_csv |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# %%
DATA_ROOT = '../../data/raw'
# %% [markdown]
# ## LOADING DATA
# %%
print('Loading raw datasets...', flush=True)
GIT_COMMITS_PATH = f"{DATA_ROOT}/GIT_COMMITS.csv"
GIT_COMMITS_CHANGES = f"{DATA_ROOT}/GIT_COMMITS_CHANGES.csv"
SONAR_MEASURES_PATH = f"{DATA_ROOT}/SONAR_MEASURES.csv"
SZZ_FAULT_INDUCING_COMMITS = f"{DATA_ROOT}/SZZ_FAULT_INDUCING_COMMITS.csv"
JIRA_ISSUES = f"{DATA_ROOT}/JIRA_ISSUES.csv"
# %%
git_commits = pd.read_csv(GIT_COMMITS_PATH)
git_commits_changes = pd.read_csv(GIT_COMMITS_CHANGES)
sonar_measures = pd.read_csv(SONAR_MEASURES_PATH)
szz_fault_inducing_commits = pd.read_csv(SZZ_FAULT_INDUCING_COMMITS)
jira_issues = | pd.read_csv(JIRA_ISSUES) | pandas.read_csv |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--datafile', type=str, default='heteroaryl_suzuki.onerx')
parser.add_argument('--output', default='heteroaryl_suzuki.csv')
args= parser.parse_args()
import pandas as pd
fname=args.datafile
sidx = 5
bidx = 6
tidx = 7
lidx = 9
yidx = 11
aidx = 12
sml_idx = 14
data = {'solvent':[], 'base':[], 'temperature':[], 'ligand':[], 'yield':[], 'article':[], 'reaction_smiles':[]}
i=0
with open(fname, 'r') as f:
for line in f:
x = eval(line)
data['solvent'].append(',,'.join(x[sidx]))
data['base'].append(',,'.join(x[bidx]))
data['temperature'].append(x[tidx][0] if x[tidx]!=[] else None)
data['ligand'].append(',,'.join(x[lidx]))
data['yield'].append(x[yidx][0] if x[yidx]!=[] else None)
data['reaction_smiles'].append(x[sml_idx])
data['article'].append(x[aidx][1] if len(x[aidx])>1 else '')
i+=1
data = | pd.DataFrame(data) | pandas.DataFrame |
#%%
import os
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
#%%
import sys
sys.path.append("/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/")
import pandas as pd
import numpy as np
import connectome_tools.process_matrix as promat
import math
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import pymaid
from pymaid_creds import url, name, password, token
# convert pair-sorted brain/sensories matrix to binary matrix based on synapse threshold
matrix_ad = pd.read_csv('data/axon-dendrite.csv', header=0, index_col=0)
matrix_dd = pd.read_csv('data/dendrite-dendrite.csv', header=0, index_col=0)
matrix_aa = pd.read_csv('data/axon-axon.csv', header=0, index_col=0)
matrix_da = pd.read_csv('data/dendrite-axon.csv', header=0, index_col=0)
matrix = matrix_ad + matrix_dd + matrix_aa + matrix_da
# the columns are string by default and the indices int; now both are int
matrix_ad.columns = pd.to_numeric(matrix_ad.columns)
matrix_dd.columns = pd.to_numeric(matrix_dd.columns)
matrix_aa.columns = pd.to_numeric(matrix_aa.columns)
matrix_da.columns = pd.to_numeric(matrix_da.columns)
matrix.columns = | pd.to_numeric(matrix.columns) | pandas.to_numeric |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
import re
from unittest.mock import Mock, call, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.categorical import (
CategoricalFuzzyTransformer, CategoricalTransformer, LabelEncodingTransformer,
OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___setstate__(self):
"""Test the ``__set_state__`` method.
Validate that the ``__dict__`` attribute is correctly udpdated when
Setup:
- create an instance of a ``CategoricalTransformer``.
Side effect:
- it updates the ``__dict__`` attribute of the object.
"""
# Setup
transformer = CategoricalTransformer()
# Run
transformer.__setstate__({
'intervals': {
None: 'abc'
}
})
# Assert
assert transformer.__dict__['intervals'][np.nan] == 'abc'
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test_is_transform_deterministic(self):
"""Test the ``is_transform_deterministic`` method.
Validate that this method returs the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_transform_deterministic()
# Assert
assert output is False
def test_is_composition_identity(self):
"""Test the ``is_composition_identity`` method.
Since ``COMPOSITION_IS_IDENTITY`` is True, just validates that the method
returns the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test__get_intervals(self):
"""Test the ``_get_intervals`` method.
Validate that the intervals for each categorical value are correct.
Input:
- a pandas series containing categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__get_intervals_nans(self):
"""Test the ``_get_intervals`` method when data contains nan's.
Validate that the intervals for each categorical value are correct, when passed
data containing nan values.
Input:
- a pandas series cotaining nan values and categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Setup
data = pd.Series(['foo', np.nan, None, 'foo', 'foo', 'tar'])
# Run
result = CategoricalTransformer._get_intervals(data)
# Assert
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
np.nan: (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
np.nan: 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', np.nan, 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__fit_intervals(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer._fit(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert transformer.intervals == expected_intervals
pd.testing.assert_series_equal(transformer.means, expected_means)
pd.testing.assert_frame_equal(transformer.starts, expected_starts)
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
np.nan: (0.5, 1.0, 0.75, 0.5 / 6),
}
# Run
result_foo = transformer._get_value('foo')
result_nan = transformer._get_value(np.nan)
# Asserts
assert result_foo == 0.25
assert result_nan == 0.75
@patch('rdt.transformers.categorical.norm')
def test__get_value_fuzzy(self, norm_mock):
# setup
norm_mock.rvs.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer._fit(data)
result = transformer._reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows.
Output:
- the output of `_transform_by_category`.
Side effects:
- `_transform_by_category` will be called once.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows.
Ouptut:
- the transformed data.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_category_nans(self):
"""Test the ``_transform_by_category`` method with data containing nans.
Validate that the data is transformed correctly when it contains nan's.
Setup:
- the categorical transformer is instantiated, and the appropriate ``intervals``
attribute is set.
Input:
- a pandas series containing nan's.
Output:
- a numpy array containing the transformed data.
"""
# Setup
data = pd.Series([np.nan, 3, 3, 2, np.nan])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
np.nan: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
@patch('rdt.transformers.categorical.norm')
def test__transform_by_category_fuzzy_true(self, norm_mock):
"""Test the ``_transform_by_category`` method when ``fuzzy`` is True.
Validate that the data is transformed correctly when ``fuzzy`` is True.
Setup:
- the categorical transformer is instantiated with ``fuzzy`` as True,
and the appropriate ``intervals`` attribute is set.
- the ``intervals`` attribute is set to a a dictionary of intervals corresponding
to the elements of the passed data.
- set the ``side_effect`` of the ``rvs_mock`` to the appropriate function.
Input:
- a pandas series.
Output:
- a numpy array containing the transformed data.
Side effect:
- ``rvs_mock`` should be called four times, one for each element of the
intervals dictionary.
"""
# Setup
def rvs_mock_func(loc, scale, **kwargs):
return loc
norm_mock.rvs.side_effect = rvs_mock_func
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Assert
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
norm_mock.rvs.assert_has_calls([
call(0.125, 0.041666666666666664, size=0),
call(0.375, 0.041666666666666664, size=2),
call(0.625, 0.041666666666666664, size=1),
call(0.875, 0.041666666666666664, size=2),
])
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = | pd.Series([1, 2, 3, 4]) | pandas.Series |
import pandas as pd
import numpy as np
from rdtools import energy_from_power
import pytest
# Tests for resampling at same frequency
def test_energy_from_power_calculation():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Series(data=4.0, index=power_times)
expected_energy_series = pd.Series(data=1.0, index=result_times)
expected_energy_series.name = 'energy_Wh'
result = energy_from_power(power_series, max_timedelta=pd.to_timedelta('15 minutes'))
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_max_interval():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Series(data=4.0, index=power_times)
expected_energy_series = pd.Series(data=np.nan, index=result_times)
expected_energy_series.name = 'energy_Wh'
result = energy_from_power(power_series, max_timedelta=pd.to_timedelta('5 minutes'))
# We expect series of NaNs, because max_interval_hours is smaller than the
# time step of the power time series
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_validation():
power_series = pd.Series(data=[4.0] * 4)
with pytest.raises(ValueError):
energy_from_power(power_series, max_timedelta=pd.to_timedelta('15 minutes'))
def test_energy_from_power_single_argument():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 15:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Series(data=4.0, index=power_times)
missing = pd.to_datetime('2018-04-01 13:00:00')
power_series = power_series.drop(missing)
expected_energy_series = pd.Series(data=1.0, index=result_times)
expected_nan = [missing]
expected_nan.append(pd.to_datetime('2018-04-01 13:15:00'))
expected_energy_series.loc[expected_nan] = np.nan
expected_energy_series.name = 'energy_Wh'
# Test that the result has the expected missing timestamp based on median timestep
result = energy_from_power(power_series)
pd.testing.assert_series_equal(result, expected_energy_series)
# Tests for downsampling
def test_energy_from_power_downsample():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
time_series = pd.Series(data=[1.0, 2.0, 3.0, 4.0, 5.0], index=times)
expected_energy_series = pd.Series(index=[pd.to_datetime('2018-04-01 13:00:00')],
data=3.0, name='energy_Wh')
expected_energy_series.index.freq = '60T'
result = energy_from_power(time_series, '60T')
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_downsample_max_timedelta_exceeded():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
time_series = pd.Series(data=[1.0, 2.0, 3.0, 4.0, 5.0], index=times)
expected_energy_series = pd.Series(index=[pd.to_datetime('2018-04-01 13:00:00')],
data=1.5, name='energy_Wh')
expected_energy_series.index.freq = '60T'
result = energy_from_power(time_series.drop(time_series.index[2]), '60T', pd.to_timedelta('15 minutes'))
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_downsample_max_timedelta_not_exceeded():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
time_series = pd.Series(data=[1.0, 2.0, 3.0, 4.0, 5.0], index=times)
expected_energy_series = pd.Series(index=[pd.to_datetime('2018-04-01 13:00:00')],
data=3.0, name='energy_Wh')
expected_energy_series.index.freq = '60T'
result = energy_from_power(time_series.drop(time_series.index[2]), '60T', pd.to_timedelta('60 minutes'))
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_for_issue_107():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 16:00', freq='15T')
dc_power = pd.Series(index=times, data=1.0)
dc_power = dc_power.drop(dc_power.index[5:12])
expected_times = pd.date_range('2018-04-01 13:00', '2018-04-01 16:00', freq='60T')
expected_energy_series = pd.Series(index=expected_times,
data=[1.0, np.nan, np.nan, 1.0],
name='energy_Wh')
result = energy_from_power(dc_power, '60T')
pd.testing.assert_series_equal(result, expected_energy_series)
# Tests for upsampling
def test_energy_from_power_upsample():
times = | pd.date_range('2018-04-01 12:00', '2018-04-01 13:30', freq='30T') | pandas.date_range |
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from itertools import product
from sklearn.model_selection import TimeSeriesSplit
import vectorbt as vbt
from vectorbt.generic import nb
seed = 42
day_dt = np.timedelta64(86400000000000)
df = pd.DataFrame({
'a': [1, 2, 3, 4, np.nan],
'b': [np.nan, 4, 3, 2, 1],
'c': [1, 2, np.nan, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
group_by = np.array(['g1', 'g1', 'g2'])
@njit
def i_or_col_pow_nb(i_or_col, x, pow):
return np.power(x, pow)
@njit
def pow_nb(x, pow):
return np.power(x, pow)
@njit
def nanmean_nb(x):
return np.nanmean(x)
@njit
def i_col_nanmean_nb(i, col, x):
return np.nanmean(x)
@njit
def i_nanmean_nb(i, x):
return np.nanmean(x)
@njit
def col_nanmean_nb(col, x):
return np.nanmean(x)
# ############# accessors.py ############# #
class TestAccessors:
def test_shuffle(self):
pd.testing.assert_series_equal(
df['a'].vbt.shuffle(seed=seed),
pd.Series(
np.array([2.0, np.nan, 3.0, 1.0, 4.0]),
index=df['a'].index,
name=df['a'].name
)
)
np.testing.assert_array_equal(
df['a'].vbt.shuffle(seed=seed).values,
nb.shuffle_1d_nb(df['a'].values, seed=seed)
)
pd.testing.assert_frame_equal(
df.vbt.shuffle(seed=seed),
pd.DataFrame(
np.array([
[2., 2., 2.],
[np.nan, 4., 1.],
[3., 3., 2.],
[1., np.nan, 1.],
[4., 1., np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_value",
[-1, 0., np.nan],
)
def test_fillna(self, test_value):
pd.testing.assert_series_equal(df['a'].vbt.fillna(test_value), df['a'].fillna(test_value))
pd.testing.assert_frame_equal(df.vbt.fillna(test_value), df.fillna(test_value))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.bshift(test_n), df['a'].shift(-test_n))
np.testing.assert_array_equal(
df['a'].vbt.bshift(test_n).values,
nb.bshift_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.bshift(test_n), df.shift(-test_n))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.fshift(test_n), df['a'].shift(test_n))
np.testing.assert_array_equal(
df['a'].vbt.fshift(test_n).values,
nb.fshift_1d_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.fshift(test_n), df.shift(test_n))
def test_diff(self):
pd.testing.assert_series_equal(df['a'].vbt.diff(), df['a'].diff())
np.testing.assert_array_equal(df['a'].vbt.diff().values, nb.diff_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.diff(), df.diff())
def test_pct_change(self):
pd.testing.assert_series_equal(df['a'].vbt.pct_change(), df['a'].pct_change(fill_method=None))
np.testing.assert_array_equal(df['a'].vbt.pct_change().values, nb.pct_change_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.pct_change(), df.pct_change(fill_method=None))
def test_ffill(self):
pd.testing.assert_series_equal(df['a'].vbt.ffill(), df['a'].ffill())
pd.testing.assert_frame_equal(df.vbt.ffill(), df.ffill())
def test_product(self):
assert df['a'].vbt.product() == df['a'].product()
np.testing.assert_array_equal(df.vbt.product(), df.product())
def test_cumsum(self):
pd.testing.assert_series_equal(df['a'].vbt.cumsum(), df['a'].cumsum())
pd.testing.assert_frame_equal(df.vbt.cumsum(), df.cumsum())
def test_cumprod(self):
pd.testing.assert_series_equal(df['a'].vbt.cumprod(), df['a'].cumprod())
pd.testing.assert_frame_equal(df.vbt.cumprod(), df.cumprod())
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_min(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_min(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window),
df.rolling(test_window).min()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_max(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_max(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window),
df.rolling(test_window).max()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_mean(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_mean(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window),
df.rolling(test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [0, 1]))
)
def test_rolling_std(self, test_window, test_minp, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df['a'].rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df.rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window),
df.rolling(test_window).std()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust",
list(product([1, 2, 3, 4, 5], [1, None], [False, True]))
)
def test_ewm_mean(self, test_window, test_minp, test_adjust):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window),
df.ewm(span=test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [False, True], [0, 1]))
)
def test_ewm_std(self, test_window, test_minp, test_adjust, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window),
df.ewm(span=test_window).std()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_min(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_min(minp=test_minp),
df['a'].expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(minp=test_minp),
df.expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(),
df.expanding().min()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_max(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_max(minp=test_minp),
df['a'].expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(minp=test_minp),
df.expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(),
df.expanding().max()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_mean(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_mean(minp=test_minp),
df['a'].expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(minp=test_minp),
df.expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(),
df.expanding().mean()
)
@pytest.mark.parametrize(
"test_minp,test_ddof",
list(product([1, 3], [0, 1]))
)
def test_expanding_std(self, test_minp, test_ddof):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df['a'].expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df.expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(),
df.expanding().std()
)
def test_apply_along_axis(self):
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=0),
df.apply(pow_nb, args=(2,), axis=0, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=1),
df.apply(pow_nb, args=(2,), axis=1, raw=True)
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_apply(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb),
df.rolling(test_window).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(3, i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[2.75, 2.75, 2.75],
[np.nan, np.nan, np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_apply(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df['a'].expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df.expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb),
df.expanding().apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[2.0, 2.0, 2.0],
[2.2857142857142856, 2.2857142857142856, 2.2857142857142856],
[2.4, 2.4, 2.4],
[2.1666666666666665, 2.1666666666666665, 2.1666666666666665]
]),
index=df.index,
columns=df.columns
)
)
def test_groupby_apply(self):
pd.testing.assert_series_equal(
df['a'].vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df['a'].groupby(np.asarray([1, 1, 2, 2, 3])).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df.groupby(np.asarray([1, 1, 2, 2, 3])).agg({
'a': lambda x: nanmean_nb(x.values),
'b': lambda x: nanmean_nb(x.values),
'c': lambda x: nanmean_nb(x.values)
}), # any clean way to do column-wise grouping in pandas?
)
def test_groupby_apply_on_matrix(self):
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2., 2., 2.],
[2.8, 2.8, 2.8],
[1., 1., 1.]
]),
index=pd.Int64Index([1, 2, 3], dtype='int64'),
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_freq",
['1h', '3d', '1w'],
)
def test_resample_apply(self, test_freq):
pd.testing.assert_series_equal(
df['a'].vbt.resample_apply(test_freq, i_col_nanmean_nb),
df['a'].resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply(test_freq, i_col_nanmean_nb),
df.resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply('3d', i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2.28571429, 2.28571429, 2.28571429],
[2., 2., 2.]
]),
index=pd.DatetimeIndex(['2018-01-01', '2018-01-04'], dtype='datetime64[ns]', freq='3D'),
columns=df.columns
)
)
def test_applymap(self):
@njit
def mult_nb(i, col, x):
return x * 2
pd.testing.assert_series_equal(
df['a'].vbt.applymap(mult_nb),
df['a'].map(lambda x: x * 2)
)
pd.testing.assert_frame_equal(
df.vbt.applymap(mult_nb),
df.applymap(lambda x: x * 2)
)
def test_filter(self):
@njit
def greater_nb(i, col, x):
return x > 2
pd.testing.assert_series_equal(
df['a'].vbt.filter(greater_nb),
df['a'].map(lambda x: x if x > 2 else np.nan)
)
pd.testing.assert_frame_equal(
df.vbt.filter(greater_nb),
df.applymap(lambda x: x if x > 2 else np.nan)
)
def test_apply_and_reduce(self):
@njit
def every_nth_nb(col, a, n):
return a[::n]
@njit
def sum_nb(col, a, b):
return np.nansum(a) + b
assert df['a'].vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)) == \
df['a'].iloc[::2].sum() + 3
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)),
df.iloc[::2].sum().rename('apply_and_reduce') + 3
)
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(
every_nth_nb, sum_nb, apply_args=(2,),
reduce_args=(3,), wrap_kwargs=dict(time_units=True)),
(df.iloc[::2].sum().rename('apply_and_reduce') + 3) * day_dt
)
def test_reduce(self):
@njit
def sum_nb(col, a):
return np.nansum(a)
assert df['a'].vbt.reduce(sum_nb) == df['a'].sum()
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb),
df.sum().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, wrap_kwargs=dict(time_units=True)),
df.sum().rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, group_by=group_by),
pd.Series([20.0, 6.0], index=['g1', 'g2']).rename('reduce')
)
@njit
def argmax_nb(col, a):
a = a.copy()
a[np.isnan(a)] = -np.inf
return np.argmax(a)
assert df['a'].vbt.reduce(argmax_nb, to_idx=True) == df['a'].idxmax()
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True),
df.idxmax().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True, flatten=True, group_by=group_by),
| pd.Series(['2018-01-02', '2018-01-02'], dtype='datetime64[ns]', index=['g1', 'g2']) | pandas.Series |
import numpy as np
from keras.models import Model
from keras.models import load_model, model_from_json
from os.path import join
import config.settings as cnst
import plots.plots as plots
from predict.predict import predict_byte, predict_byte_by_section
from predict.predict_args import DefaultPredictArguments, Predict as pObj
from .ati_args import SectionActivationDistribution
import pandas as pd
from analyzers.collect_exe_files import get_partition_data, store_partition_data
import gc
import logging
import pefile
def find_qualified_sections(sd, trend, common_trend, support, fold_index):
""" Function for training Tier-1 model with whole byte sequence data
Args:
sd: object to hold activation distribution of PE sections
trend: plain activation trend found by core ATI process
common_trend: not used here
support: not used here
fold_index: current fold index of cross validation
Returns:
q_sections_by_q_criteria: a dict with q_criterion found for each percentile supplied and
their respective list of sections qualified.
"""
btrend = trend.loc["BENIGN_ACTIVATION_MAGNITUDE"]
mtrend = trend.loc["MALWARE_ACTIVATION_MAGNITUDE"]
# Averaging based on respective benign and malware population
btrend = btrend / sd.b1_b_truth_count
mtrend = mtrend / sd.b1_m_truth_count
btrend[btrend == 0] = 1
mtrend[mtrend == 0] = 1
malfluence = mtrend / btrend
benfluence = btrend / mtrend
mal_q_criteria_by_percentiles = np.percentile(malfluence, q=cnst.PERCENTILES)
ben_q_criteria_by_percentiles = np.percentile(benfluence, q=cnst.PERCENTILES)
q_sections_by_q_criteria = {}
for i, _ in enumerate(cnst.PERCENTILES):
# Uncomment [:50] for unqualified sections. Set percentile to 48
q_sections_by_q_criteria[mal_q_criteria_by_percentiles[i]] = np.unique(np.concatenate([trend.columns[malfluence > mal_q_criteria_by_percentiles[i]], trend.columns[benfluence > ben_q_criteria_by_percentiles[i]]])) # [:50]
if i == 0: # Do once for lowest percentile
list_qsec = np.concatenate([trend.columns[malfluence > mal_q_criteria_by_percentiles[i]], trend.columns[benfluence > ben_q_criteria_by_percentiles[i]]]) # [:50]
list_avg_act_mag_signed = np.concatenate([malfluence[malfluence > mal_q_criteria_by_percentiles[i]] * -1, benfluence[benfluence > ben_q_criteria_by_percentiles[i]]]) # [:50]
available_sec = pd.read_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC + 'available_sections.csv', header=None)
available_sec = list(available_sec.iloc[0])
sec_emb = pd.read_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC + 'section_embeddings.csv')
list_qsec_id = []
list_qsec_emb = []
for q in list_qsec:
try:
list_qsec_emb.append(sec_emb[q][0])
list_qsec_id.append(available_sec.index(q) + 1)
except Exception as e:
if not (cnst.LEAK in str(e) or cnst.PADDING in str(e)):
logging.debug("The section ["+str(q)+"] is not present in available_sections.csv/section_embeddings.csv")
influence = np.concatenate([malfluence[malfluence > mal_q_criteria_by_percentiles[i]], benfluence[benfluence > ben_q_criteria_by_percentiles[i]]])
qdf = pd.DataFrame([list_qsec, list_qsec_id, list_qsec_emb, list_avg_act_mag_signed, influence], columns=list_qsec, index=['a', 'b', 'c', 'd', 'e'])
qdf = qdf.transpose().sort_values(by='e', ascending=False).transpose()
qdf.to_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC + 'qsections_meta_'+str(fold_index)+'.csv', header=None, index=False)
# print("Mal Sections:", trend.columns[malfluence > mal_q_criteria_by_percentiles[i]])
# print("Ben Sections:", trend.columns[benfluence > ben_q_criteria_by_percentiles[i]])
logging.info("Qsections found - " + str(len(q_sections_by_q_criteria.keys())))
logging.info(q_sections_by_q_criteria.keys())
return q_sections_by_q_criteria
def parse_pe_pkl(file_index, file_id, fjson, unprocessed):
""" Function to parse pickle file to find the boundaries of PE sections in a sample's pickle representation
Args:
file_index: PE sample index
file_id: PE name
fjson: pickle data representation of PE sample
unprocessed: keeps track of count of samples not processed properly
Returns:
section_bounds: PE section boundaries
unprocessed: keeps track of count of samples not processed properly
file_byte_size: size of full sample
"""
section_bounds = []
file_byte_size = None
max_section_end_offset = 0
try:
# file_byte_size = fjson['size_byte']
with open(cnst.RAW_SAMPLE_DIR + file_id, 'rb') as f:
file_byte_size = len(list(f.read()))
pe = pefile.PE(cnst.RAW_SAMPLE_DIR + file_id)
for pkl_section in pe.sections:
section_bounds.append(
(pkl_section.Name.strip(b'\x00').decode("utf-8").strip(),
pkl_section.PointerToRawData,
pkl_section.PointerToRawData + pkl_section.SizeOfRawData))
if (pkl_section.PointerToRawData + pkl_section.SizeOfRawData) > max_section_end_offset:
max_section_end_offset = (pkl_section.PointerToRawData + pkl_section.SizeOfRawData)
# Placeholder section "padding" - for activations in padding region
# if max_section_end_offset < fjson["size_byte"]:
# section_bounds.append((cnst.TAIL, max_section_end_offset + 1, fjson["size_byte"]))
# section_bounds.append((cnst.PADDING, fjson["size_byte"] + 1, cnst.MAX_FILE_SIZE_LIMIT))
except Exception as e:
logging.Exception("parse failed . . . [FILE INDEX - " + str(file_index) + "] [" + str(file_id) + "] ")
unprocessed += 1
return section_bounds, unprocessed, file_byte_size
def map_act_to_sec(ftype, fmap, sbounds, sd):
"""
Function to map each hidden layer activation found to corresponding PE section
Params:
ftype: Benign or Malware
fmap: Hidden layer activation map
sbounds: Dict of PE sections and their boundaries
Return:
sd: Object to hold computed activation distribution of PE sections
Description of other variables/objects used:
section_support: Information about how many samples in a given category has a section <Influence by presence>
activation_histogram: Information about total count of activations occurred in a given section for all samples
of given category <Influence by activation count>
activation_magnitude: Information about total sum of magnitude of activations occurred in a given section
for all samples of given category <Influence by activation strength>
"""
# fmap = fmap // 1 # print("FEATURE MAP ", len(feature_map), " : \n", feature_map)
idx = np.argsort(fmap)[::-1][:len(fmap)] # Sort activations in descending order -- Helpful to find top activations
if sbounds is not None:
for j in range(0, len(sbounds)):
section = sbounds[j][0]
sd.a_section_support[section] = (
sd.a_section_support[section] + 1) if section in sd.a_section_support.keys() else 1
if ftype == cnst.BENIGN:
sd.b_section_support[section] = (
sd.b_section_support[section] + 1) if section in sd.b_section_support.keys() else 1
if section not in sd.m_section_support.keys():
sd.m_section_support[section] = 0
else:
if section not in sd.b_section_support.keys():
sd.b_section_support[section] = 0
sd.m_section_support[section] = (
sd.m_section_support[section] + 1) if section in sd.m_section_support.keys() else 1
for current_activation_window in range(0, len(fmap)): # range(0, int(cnst.MAX_FILE_SIZE_LIMIT / cnst.CONV_STRIDE_SIZE)):
section = None
offset = idx[current_activation_window] * cnst.CONV_WINDOW_SIZE
act_val = fmap[idx[current_activation_window]]
######################################################################################
# Change for Pooling layer based Activation trend - Only Max activation is traced back
if act_val == 0:
continue
######################################################################################
for j in range(0, len(sbounds)):
cur_section = sbounds[j]
if cur_section[1] <= offset <= cur_section[2]:
section = cur_section[0]
break
if section is not None:
# if "." not in section: section = "." + section #Same section's name with and without dot are different
# Sum of Magnitude of Activations
if section in sd.a_activation_magnitude.keys():
sd.a_activation_magnitude[section] += act_val
sd.a_activation_histogram[section] += 1
if ftype == cnst.BENIGN:
if sd.b_activation_magnitude[section] is None:
sd.b_activation_magnitude[section] = act_val
sd.b_activation_histogram[section] = 1
else:
sd.b_activation_magnitude[section] += act_val
sd.b_activation_histogram[section] += 1
else:
if sd.m_activation_magnitude[section] is None:
sd.m_activation_magnitude[section] = act_val
sd.m_activation_histogram[section] = 1
else:
sd.m_activation_magnitude[section] += act_val
sd.m_activation_histogram[section] += 1
else:
sd.a_activation_magnitude[section] = act_val
sd.a_activation_histogram[section] = 1
if ftype == cnst.BENIGN:
sd.b_activation_magnitude[section] = act_val
sd.b_activation_histogram[section] = 1
sd.m_activation_magnitude[section] = None
sd.m_activation_histogram[section] = None
else:
sd.b_activation_magnitude[section] = None
sd.b_activation_histogram[section] = None
sd.m_activation_magnitude[section] = act_val
sd.m_activation_histogram[section] = 1
else:
# !!! VERIFY ALL OFFSET IS MATCHED AND CHECK FOR LEAKAGE !!!
# print("No matching section found for OFFSET:", offset)
sd.a_activation_magnitude[cnst.LEAK] += act_val
sd.a_activation_histogram[cnst.LEAK] += 1
if ftype == cnst.BENIGN:
sd.b_activation_magnitude[cnst.LEAK] += act_val
sd.b_activation_histogram[cnst.LEAK] += 1
else:
sd.m_activation_magnitude[cnst.LEAK] += act_val
sd.m_activation_histogram[cnst.LEAK] += 1
return sd
def get_feature_maps(smodel, partition, files):
"""
Function to obtain hidden layer activation (feature) maps using given stunted model
Params:
smodel: stunted model to use
partition: partition for current set of B1 samples under process
files: IDs of the samples to be processed from the partition
Returns:
raw_feature_maps: hidden layer activation (feature) maps
"""
predict_args = DefaultPredictArguments()
predict_args.verbose = cnst.ATI_PREDICT_VERBOSE
xlen = len(files)
predict_args.pred_steps = xlen // predict_args.batch_size if xlen % predict_args.batch_size == 0 else xlen // predict_args.batch_size + 1
raw_feature_maps = predict_byte(smodel, files, predict_args)
return raw_feature_maps
def process_files(stunted_model, args, sd):
"""
Function to process the B1 samples to obtain hidden layer activation maps and trace back their PE sections
Params:
stunted_model: Tier-1 model that is stunted up to required hidden layer where activation maps are collected.
args: contains various config data
Returns:
sd: Object to hold computed activation distribution of PE sections
"""
unprocessed = 0
samplewise_feature_maps = []
files = args.t2_x_train
files_type = args.t2_y_train
logging.info("FMAP MODULE Total B1 [{0}]\tGroundTruth [{1}:{2}]".format(len(args.t2_y_train), len(np.where(args.t2_y_train == cnst.BENIGN)[0]), len(np.where(args.t2_y_train == cnst.MALWARE)[0])))
# file_type = pObj_fmap.ytrue[i] # Using Ground Truth to get trend of actual benign and malware files
# file_whole_bytes = {file[:-4]: args.whole_b1_train_partition[file[:-4]]}
raw_feature_maps = get_feature_maps(stunted_model, args.whole_b1_train_partition, files)
del args.whole_b1_train_partition
gc.collect()
for i in range(0, len(files)):
section_bounds, unprocessed, fsize = parse_pe_pkl(i, files[i], args.section_b1_train_partition[files[i]], unprocessed)
if cnst.USE_POOLING_LAYER:
try:
pooled_max_1D_map = np.sum(raw_feature_maps[i] == np.amax(raw_feature_maps[i], axis=0), axis=1)[:np.min([cnst.MAX_FILE_CONVOLUTED_SIZE,int(fsize/cnst.CONV_STRIDE_SIZE)+2])]
sd = map_act_to_sec(files_type[i], pooled_max_1D_map, section_bounds, sd)
except Exception as e:
logging.exception("$$$$$$$$ " + str(np.shape(raw_feature_maps[i]))) # .size, files[i], args.whole_b1_train_partition[files[i][:-4]])
else:
feature_map = raw_feature_maps[i].sum(axis=1).ravel()
# feature_map_histogram(feature_map, prediction)
samplewise_feature_maps.append(feature_map)
sd = map_act_to_sec(files_type[i], feature_map, section_bounds, sd)
del args.section_b1_train_partition
gc.collect()
return sd
# print(section_stat)
# print("Unprocessed file count: ", unprocessed)
# Find activation distribution
# raw_arr = np.array(np.squeeze(temp_feature_map_list))
# print(len(raw_arr), raw_arr.max())
# raw_arr = raw_arr[raw_arr > 0.3]
# print(len(raw_arr))
# plt.hist(raw_arr, 10)#range(0, len(raw_arr)))
# plt.show()
'''for key in act.keys():
# key = "."+key if "." not in key else key
if key is not None and key != '' and key != '.padding':
with open("BENIGN" if "benign" in section_stat_file else "MALWARE" + "_activation_" + key[1:] + ".csv", mode='a+') as f:
f.write(str(act[key]))
'''
'''
#overall_stat.append(section_stat)
for x in pcs_keys:
overall_stat_str += str(section_stat[x]) + ","
overall_stat_str = overall_stat_str[:-1] + "\n"
print("\n[Unprocessed Files : ", unprocessed, "] Overall Stats: ", overall_stat_str)
processed_file_count = len(fn_list) - unprocessed
normalized_stats_str = str(section_stat["header"]/processed_file_count) + "," \
+ str(section_stat["text"]/processed_file_count) + "," \
+ str(section_stat["data"]/processed_file_count) + "," \
+ str(section_stat["rsrc"]/processed_file_count) + "," \
+ str(section_stat["pdata"]/processed_file_count) + "," \
+ str(section_stat["rdata"]/processed_file_count) + "\n"
#+ str(section_stat["padding"]/processed_file_count) \
print("Normalized Stats: ", normalized_stats_str)
#plt.show()
with open(section_stat_file, 'w+') as f:
f.write(overall_stat_str)
f.write("\n")
f.write(normalized_stats_str)
'''
def change_model(model, new_input_shape=(None, cnst.SAMPLE_SIZE)):
""" Function to transfer weights of pre-trained Malconv to the block based model with reduced input shape.
Args:
model: An object with required parameters/hyper-parameters for loading, configuring and compiling
new_input_shape: a value <= Tier-1 model's input shape. Typically, ( Num of Conv. Filters * Size of Conv. Stride )
Returns:
new_model: new model with reduced input shape and weights updated
"""
model._layers[0].batch_input_shape = new_input_shape
new_model = model_from_json(model.to_json())
for layer in new_model.layers:
try:
layer.set_weights(model.get_layer(name=layer.name).get_weights())
# logging.info("Loaded and weights set for layer {}".format(layer.name))
except Exception as e:
logging.exception("Could not transfer weights for layer {}".format(layer.name))
return new_model
def get_stunted_model(args, tier):
""" Function to stunt the given model up to the required hidden layer
based on the supplied hidden layer number
"""
complete_model = load_model(join(args.save_path, args.t1_model_name if tier == 1 else args.t2_model_name))
complete_model = change_model(complete_model, new_input_shape=(None, cnst.SAMPLE_SIZE))
# model.summary()
# redefine model to output right after the sixth hidden layer
# (ReLU activation layer after convolution - before max pooling)
stunted_outputs = [complete_model.layers[x].output for x in [args.layer_num]]
# stunted_outputs = complete_model.get_layer('multiply_1').output
stunted_model = Model(inputs=complete_model.inputs, outputs=stunted_outputs)
# stunted_model.summary()
logging.debug("Model stunted upto " + str(stunted_outputs[0]) + " Layer number passed to stunt:" + str(args.layer_num))
return stunted_model
def save_activation_trend(sd):
"""
Function to save the various activation trends identified in CSV format files.
Params:
sd: Object containing computed activation distribution of PE sections
Returns:
fmaps_trend: used to identify the qualified sections in subsequent steps
others: Not in use currently
"""
fmaps_trend = pd.DataFrame()
fmaps_common_trend = pd.DataFrame()
fmaps_section_support = pd.DataFrame()
fmaps_trend["ACTIVATION / HISTOGRAM"] = ["ALL_ACTIVATION_MAGNITUDE", "BENIGN_ACTIVATION_MAGNITUDE",
"MALWARE_ACTIVATION_MAGNITUDE", "HISTOGRAM_ALL", "HISTOGRAM_BENIGN",
"HISTOGRAM_MALWARE"]
fmaps_common_trend["COMMON"] = ["ALL_ACTIVATION_MAGNITUDE", "BENIGN_ACTIVATION_MAGNITUDE",
"MALWARE_ACTIVATION_MAGNITUDE", "HISTOGRAM_ALL", "HISTOGRAM_BENIGN",
"HISTOGRAM_MALWARE"]
fmaps_section_support["SUPPORT"] = ["PRESENCE_IN_ALL", "PRESENCE_IN_BENIGN", "PRESENCE_IN_MALWARE",
"SUPPORT_IN_ALL", "SUPPORT_IN_BENIGN", "SUPPORT_IN_MALWARE"]
for key in sd.a_activation_histogram.keys():
fmaps_trend[key] = [int(sd.a_activation_magnitude[key]) if sd.a_activation_magnitude[key] is not None else
sd.a_activation_magnitude[key],
int(sd.b_activation_magnitude[key]) if sd.b_activation_magnitude[key] is not None else
sd.b_activation_magnitude[key],
int(sd.m_activation_magnitude[key]) if sd.m_activation_magnitude[key] is not None else
sd.m_activation_magnitude[key],
int(sd.a_activation_histogram[key]) if sd.a_activation_histogram[key] is not None else
sd.a_activation_histogram[key],
int(sd.b_activation_histogram[key]) if sd.b_activation_histogram[key] is not None else
sd.b_activation_histogram[key],
int(sd.m_activation_histogram[key]) if sd.m_activation_histogram[key] is not None else
sd.m_activation_histogram[key]]
if sd.b_activation_histogram[key] is not None and sd.m_activation_histogram[key] is not None:
fmaps_common_trend[key] = [
int(sd.a_activation_magnitude[key]) if sd.a_activation_magnitude[key] is not None else
sd.a_activation_magnitude[key],
int(sd.b_activation_magnitude[key]) if sd.b_activation_magnitude[key] is not None else
sd.b_activation_magnitude[key],
int(sd.m_activation_magnitude[key]) if sd.m_activation_magnitude[key] is not None else
sd.m_activation_magnitude[key],
int(sd.a_activation_histogram[key]) if sd.a_activation_histogram[key] is not None else
sd.a_activation_histogram[key],
int(sd.b_activation_histogram[key]) if sd.b_activation_histogram[key] is not None else
sd.b_activation_histogram[key],
int(sd.m_activation_histogram[key]) if sd.m_activation_histogram[key] is not None else
sd.m_activation_histogram[key]]
if sd.b1_count > 0 and sd.b1_b_truth_count > 0 and sd.b1_m_truth_count > 0:
for key in sd.a_section_support.keys():
fmaps_section_support[key] = [sd.a_section_support[key], sd.b_section_support[key],
sd.m_section_support[key],
"{:0.1f}%".format(sd.a_section_support[key] / sd.b1_count * 100),
"{:0.1f}%".format(sd.b_section_support[key] / sd.b1_b_truth_count * 100),
"{:0.1f}%".format(sd.m_section_support[key] / sd.b1_m_truth_count * 100)]
fmaps_trend.fillna(-1, inplace=True)
fmaps_trend.set_index('ACTIVATION / HISTOGRAM', inplace=True)
fmaps_common_trend.set_index('COMMON', inplace=True)
fmaps_section_support.set_index('SUPPORT', inplace=True)
# Store activation trend identified
fmaps_trend.to_csv(cnst.COMBINED_FEATURE_MAP_STATS_FILE, index=True)
fmaps_common_trend.to_csv(cnst.COMMON_COMBINED_FEATURE_MAP_STATS_FILE, index=True)
fmaps_section_support.to_csv(cnst.SECTION_SUPPORT, index=True)
# Drop padding and leak information after saving - not useful for further processing
try:
fmaps_trend.drop([cnst.PADDING], axis=1, inplace=True)
fmaps_common_trend.drop([cnst.PADDING], axis=1, inplace=True)
fmaps_section_support.drop([cnst.PADDING], axis=1, inplace=True)
fmaps_trend.drop([cnst.LEAK], axis=1, inplace=True)
fmaps_common_trend.drop([cnst.LEAK], axis=1, inplace=True)
except:
logging.info("Proceeding after trying to clean fmap data.")
return fmaps_trend, fmaps_common_trend, fmaps_section_support
def start_ati_process(args, fold_index, partition_count, sd):
"""
Function to perform the ATI process over all partitions of B1 training set
Params:
args: contains various config data
fold_index: current fold index of cross validation
partition_count: count of train B1 partitions
Returns:
sd: Object containing computed activation distribution of PE sections
"""
args.layer_num = cnst.LAYER_NUM_TO_STUNT
stunted_model = get_stunted_model(args, tier=1)
for pcount in range(0, partition_count):
logging.info("ATI for partition: %s", pcount)
b1datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "b1_train_" + str(fold_index) + "_p" + str(pcount) + ".csv", header=None)
args.t2_x_train, args.t2_y_train = b1datadf.iloc[:, 0], b1datadf.iloc[:, 1]
args.whole_b1_train_partition = get_partition_data("b1_train", fold_index, pcount, "t1")
args.section_b1_train_partition = get_partition_data("b1_train", fold_index, pcount, "t2")
sd = process_files(stunted_model, args, sd)
del stunted_model
gc.collect()
return sd
def get_top_act_blocks(top_acts_idx, sbounds, q_sections, whole_bytes):
"""
Function to map the top activation back to Qualified section's byte blocks and collating them to form block dataset
Params:
top_acts_idx: act as offsets of top activations in the hidden layer activation (feature) map
sbounds: Pe section boundaries
q_sections: qualified sections
whole_bytes: Entire byte content of a PE sample
Returns:
top_blocks: single sequence of all top blocks found
"""
top_blocks = []
top_acts_idx.sort()
if sbounds is not None:
for idx, offset in enumerate(top_acts_idx * cnst.CONV_STRIDE_SIZE):
for sname, low, upp in sbounds:
if low <= offset <= upp:
if sname in q_sections:
try:
top_blocks.extend(whole_bytes[offset:offset+cnst.CONV_STRIDE_SIZE])
break
except Exception as e:
logging.exception("[MODULE: get_section_id_vector()] Error occurred while mapping section id: %s %s %s %s %s %s",
idx, low, offset, upp, sname, sname in q_sections)
# else:
# print(sname, sname in q_sections, sname in available_sections)
else:
logging.info("Sections bounds not available. Returning a vector of Zeroes for section id vector.")
return top_blocks
def collect_b1_block_dataset(args, fold_index, partition_count, mode, qcnt='X'):
"""
Function to generate the top ativation blocks based dataset from B1 sample set
Params:
args: an object containing various config data
fold_index: current fold index of cross validation
partition_count: count of B1 train partitions
mode: phase of data collection - Train / Val / Test
qcnt: index of the current q_criterion. 'X' for Testing phase
Returns:
None (collected data is persisted directly to disk storage)
"""
args.layer_num = cnst.LAYER_NUM_TO_COLLECT_NN_DATASET
stunted_model = get_stunted_model(args, tier=cnst.TIER_TO_COLLECT_BLOCK_DATA)
for pcount in range(0, partition_count):
logging.info("Collecting Block data for partition: %s", pcount)
b1datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "b1_"+mode+"_" + str(fold_index) + "_p" + str(pcount) + ".csv", header=None)
files, files_type = b1datadf.iloc[:, 0], b1datadf.iloc[:, 1]
args.whole_b1_partition = get_partition_data("b1_"+mode, fold_index, pcount, "t1")
args.section_b1_partition = get_partition_data("b1_"+mode, fold_index, pcount, "t2")
unprocessed = 0
logging.info("Block Module Total B1 [{0}]\tGroundTruth [{1}:{2}]".format(len(files_type), len(np.where(files_type == cnst.BENIGN)[0]), len(np.where(files_type == cnst.MALWARE)[0])))
nn_predict_args = DefaultPredictArguments()
nn_predict_args.verbose = cnst.ATI_PREDICT_VERBOSE
raw_feature_maps = predict_byte_by_section(stunted_model, args.section_b1_partition, files, args.q_sections, None, nn_predict_args)
logging.info("Raw feature maps found.")
for i in range(0, len(files)):
section_bounds, unprocessed, fsize = parse_pe_pkl(i, files[i][:-4], args.section_b1_partition[files[i][:-4]], unprocessed)
if cnst.USE_POOLING_LAYER:
try:
cur_fmap = raw_feature_maps[i]
top_acts_idx = np.argmax(cur_fmap, axis=0)
top_blocks = get_top_act_blocks(top_acts_idx, section_bounds, args.q_sections, args.whole_b1_partition[files[i][:-4]]["whole_bytes"])
if sum(top_blocks) == 0:
logging.debug("No useful top block data added for sample " + files[i])
except Exception as e:
logging.exception("$$$$ Error occurred in Top Activation Block Module. $$$$")
args.whole_b1_partition[files[i][:-4]]["whole_bytes"] = top_blocks
store_partition_data("block_b1_"+mode, fold_index, pcount, "t1", args.whole_b1_partition)
del args.section_b1_partition
del args.whole_b1_partition
gc.collect()
del stunted_model
gc.collect()
def init(args, fold_index, partition_count, b1_all_file_cnt, b1b_all_truth_cnt, b1m_all_truth_cnt):
""" Activation Trend Identification (ATI) Module
Args:
args: various data required for ATI
fold_index: current fold of cross-validation
partition_count: number of partitions created for b1 training set
b1_all_file_cnt: count of samples in b1 set
b1b_all_truth_cnt: count of benign samples in b1 training set
b1m_all_truth_cnt: count of malware samples in b1 training set
Returns:
None (Resultant data are stored in CSV for further use)
"""
sd = SectionActivationDistribution()
sd.b1_count = b1_all_file_cnt
sd.b1_b_truth_count = b1b_all_truth_cnt
sd.b1_m_truth_count = b1m_all_truth_cnt
sd = start_ati_process(args, fold_index, partition_count, sd)
trend, common_trend, support = save_activation_trend(sd)
# select sections for Tier-2 based on identified activation trend
q_sections_by_q_criteria = find_qualified_sections(sd, trend, common_trend, support, fold_index)
# select, drop = plots.save_stats_as_plot(fmaps, qualification_criteria)
# Save qualified sections by Q_criteria
qdata = [np.concatenate([[str(q_criterion)], q_sections_by_q_criteria[q_criterion]]) for q_criterion in q_sections_by_q_criteria]
| pd.DataFrame(qdata) | pandas.DataFrame |
import gc
import itertools
import multiprocessing
import time
from collections import Counter
import numpy as np
import pandas as pd
def create_customer_feature_set(train):
customer_feats = pd.DataFrame()
customer_feats['customer_id'] = train.customer_id
customer_feats['customer_max_ratio'] = train.customer_id / \
np.max(train.customer_id)
customer_feats['index_max_ratio'] = train.customer_id / \
(train.index + 1e-14)
customer_feats['customer_count'] = train.customer_id.map(
train.customer_id.value_counts())
customer_feats['cust_first'] = train.customer_id.apply(
lambda x: int(str(x)[:1]))
customer_feats['cust_2first'] = train.customer_id.apply(
lambda x: int(str(x)[:2]))
customer_feats['cust_3first'] = train.customer_id.apply(
lambda x: int(str(x)[:3]))
customer_feats['cust_4first'] = train.customer_id.apply(
lambda x: int(str(x)[:4]))
customer_feats['cust_6first'] = train.customer_id.apply(
lambda x: int(str(x)[:6]))
# customer_feats.cust_3first = pd.factorize(customer_feats.cust_3first)[0]
customer_feats.drop(['customer_id'], axis=1, inplace=True)
return customer_feats
def create_groupings_feature_set(data, features, transform=True):
df_features = pd.DataFrame()
year_mean = group_feat_by_feat(
data, 'year', features, 'mean', transform)
year_max = group_feat_by_feat(
data, 'year', features, 'max', transform)
year_count = group_feat_by_feat(
data, 'year', features, 'count', transform)
month_mean = group_feat_by_feat(
data, 'month', features, 'mean', transform)
month_max = group_feat_by_feat(
data, 'month', features, 'max', transform)
month_count = group_feat_by_feat(
data, 'month', features, 'count', transform)
market_mean = group_feat_by_feat(
data, 'market', features, 'mean', transform)
market_max = group_feat_by_feat(
data, 'market', features, 'max', transform)
market_count = group_feat_by_feat(
data, 'market', features, 'count', transform)
customer_mean = group_feat_by_feat(
data, 'customer_id', features, 'mean', transform)
customer_max = group_feat_by_feat(
data, 'customer_id', features, 'max', transform)
customer_count = group_feat_by_feat(
data, 'customer_id', features, 'count', transform)
df_features = pd.concat([year_mean, year_max, year_count, month_mean, month_max, month_count,
market_mean, market_max, market_count,
customer_mean, customer_max, customer_count], axis=1)
del year_mean, year_max, year_count, month_mean, month_max, month_count, \
market_mean, market_max, market_count, \
customer_mean, customer_max, customer_count
gc.collect()
return df_features
def create_aggregated_lags(df, current_month,
only_target=False, features=None, agg_func='mean',
month_merge=False):
assert(current_month > 0 and current_month < 16)
if month_merge:
df_result = df[df.date == current_month][['customer_id']]
else:
df_result = df[['customer_id']]
print('Creating grouping features based on aggregated data before {} date.'.format(
current_month))
print('Beginning shape:', df_result.shape)
if features is not None:
if 'customer_id' not in features:
features.append('customer_id')
df_lag = df[df.date < current_month]
if only_target:
df_lag = df_lag[['customer_id', 'target']].groupby(
'customer_id', as_index=False).agg('{}'.format(agg_func))
else:
if features is not None:
df_lag = df_lag[features].groupby(
'customer_id', as_index=False).agg('{}'.format(agg_func))
df_lag.columns = ['{}_lag_agg'.format(
x) if 'customer' not in x else x for x in df_lag.columns]
df_result = df_result.merge(
df_lag, on=['customer_id'], how='left', copy=False)
to_drop = [x for x in df_result.columns if 'customer' in x]
df_result.drop(to_drop, axis=1, inplace=True)
print('Final shape:', df_result.shape)
return df_result
def create_lag_features(df, current_month=1, start_lag=0, incremental=False,
only_target=False, features=None, agg_func='mean',
month_merge=False):
if month_merge:
df_result = df[df.date == current_month][['customer_id', 'target']]
else:
df_result = df[['customer_id', 'target']]
lag_subset = np.arange(start_lag, current_month, 1)
print('Beginning shape:', df_result.shape, 'Lag subset:', lag_subset)
if features is not None:
if 'customer_id' not in features:
features.append('customer_id')
if incremental:
print('Creating grouping features based on incremental lags.')
if not incremental:
print('Creating grouping features based on non-incremental lags.')
print('For non-incremental lags only mean aggregation can be used - switch to it.')
agg_func = 'mean'
for i in range(len(lag_subset)):
if incremental:
print('Dates subset:', lag_subset[lag_subset <= lag_subset[i]])
df_lag = df[df.date <= lag_subset[i]]
else:
df_lag = df[df.date == lag_subset[i]]
if only_target:
df_lag = df_lag[['customer_id', 'target']].groupby(
'customer_id', as_index=False).agg('{}'.format(agg_func))
else:
if features is not None:
df_lag = df_lag[features].groupby(
'customer_id', as_index=False).agg('{}'.format(agg_func))
df_lag.columns = ['{}_lag{}'.format(
x, i) if 'customer' not in x else x for x in df_lag.columns]
df_result = df_result.merge(
df_lag, on=['customer_id'], how='left', copy=False)
to_drop = [x for x in df_result.columns if 'customer' in x]
to_drop.append('target')
df_result.drop(to_drop, axis=1, inplace=True)
print('Final shape:', df_result.shape)
return df_result
def prepare_lags_data(train, test,
start_train=1, end_train=11,
start_test=12, end_test=15,
only_target=False, features=None,
incremental=False, agg_func='mean'):
df_train = pd.DataFrame()
df_test = pd.DataFrame()
print('Create training set.\n')
for i in range(start_train, end_train + 1, 1):
if incremental:
lag_features = create_lag_features(
train, i, start_train, incremental=incremental,
only_target=only_target, features=features, agg_func=agg_func)
else:
lag_features = create_lag_features(
train, i, i - 1, incremental=incremental,
only_target=only_target, features=features, agg_func=agg_func)
df_train = pd.concat([df_train, lag_features])
print('Current train shape:', df_train.shape)
print('\nCreate test set.\n')
for i in range(start_test, end_test + 1, 1):
if incremental:
lag_features = create_lag_features(
test, i, start_test, incremental=incremental,
only_target=only_target, features=features, agg_func=agg_func)
else:
lag_features = create_lag_features(
test, i, i - 1, incremental=incremental,
only_target=only_target, features=features, agg_func=agg_func)
df_test = pd.concat([df_test, lag_features])
print('Current test shape:', df_test.shape)
print('Final shapes:', df_train.shape, df_test.shape)
df_train.drop(['target'], axis=1, inplace=True)
df_train.reset_index(inplace=True, drop=True)
df_test.drop(['target'], axis=1, inplace=True)
df_test.reset_index(inplace=True, drop=True)
return df_train, df_test
def prepare_aggregated_lags(train, test,
start_train=0, end_train=11,
start_test=12, end_test=15,
only_target=False,
features=None, agg_func='mean'):
df_train = pd.DataFrame()
df_test = pd.DataFrame()
print('Create training set.\n')
for i in range(start_train, end_train + 1, 1):
lag_features = create_aggregated_lags(
train, i,
only_target=only_target, features=features, agg_func=agg_func)
df_train = | pd.concat([df_train, lag_features]) | pandas.concat |
__all__ = [
"eval_df",
"ev_df",
"eval_nominal",
"ev_nominal",
"eval_grad_fd",
"ev_grad_fd",
"eval_conservative",
"ev_conservative",
]
import itertools
import grama as gr
from grama import add_pipe, pipe
from numpy import ones, eye, tile, atleast_2d
from pandas import DataFrame, concat
from toolz import curry
## Default evaluation function
# --------------------------------------------------
@curry
def eval_df(model, df=None, append=True, verbose=True):
r"""Evaluate model at given values
Evaluates a given model at a given dataframe.
Args:
model (gr.Model): Model to evaluate
df (DataFrame): Input dataframe to evaluate
append (bool): Append results to original dataframe?
Returns:
DataFrame: Results of model evaluation
Examples:
>>> import grama as gr
>>> from grama.models import make_test
>>> md = make_test()
>>> df = gr.df_make(x0=0, x1=1, x2=2)
>>> md >> gr.ev_df(df=df)
"""
if df is None:
raise ValueError("No input df given")
if len(model.functions) == 0:
raise ValueError("Given model has no functions")
out_intersect = set(df.columns).intersection(model.out)
if (len(out_intersect) > 0) and verbose:
print(
"... provided columns intersect model output.\n"
+ "eval_df() is dropping {}".format(out_intersect)
)
df_res = model.evaluate_df(df)
if append:
df_res = concat(
[
df.reset_index(drop=True).drop(model.out, axis=1, errors="ignore"),
df_res,
],
axis=1,
)
return df_res
ev_df = add_pipe(eval_df)
## Nominal evaluation
# --------------------------------------------------
@curry
def eval_nominal(model, df_det=None, append=True, skip=False):
r"""Evaluate model at nominal values
Evaluates a given model at a model nominal conditions (median).
Args:
model (gr.Model): Model to evaluate
df_det (DataFrame): Deterministic levels for evaluation; use "nom"
for nominal deterministic levels.
append (bool): Append results to nominal inputs?
skip (bool): Skip evaluation of the functions?
Returns:
DataFrame: Results of nominal model evaluation or unevaluated design
Examples:
>>> import grama as gr
>>> from grama.models import make_test
>>> md = make_test()
>>> md >> gr.ev_nominal(df_det="nom")
"""
## Draw from underlying gaussian
quantiles = ones((1, model.n_var_rand)) * 0.5 # Median
## Convert samples to desired marginals
df_pr = DataFrame(data=quantiles, columns=model.var_rand)
df_rand = model.density.pr2sample(df_pr)
## Construct outer-product DOE
df_samp = model.var_outer(df_rand, df_det=df_det)
if skip:
return df_samp
return eval_df(model, df=df_samp, append=append)
ev_nominal = add_pipe(eval_nominal)
## Gradient finite-difference evaluation
# --------------------------------------------------
@curry
def eval_grad_fd(model, h=1e-8, df_base=None, var=None, append=True, skip=False):
r"""Finite-difference gradient approximation
Evaluates a given model with a central-difference stencil to approximate the
gradient.
Args:
model (gr.Model): Model to differentiate
h (numeric): finite difference stepsize,
single (scalar): or per-input (array)
df_base (DataFrame): Base-points for gradient calculations
var (list(str) or string): list of variables to differentiate,
or flag; "rand" for var_rand, "det" for var_det
append (bool): Append results to base point inputs?
skip (bool): Skip evaluation of the functions?
Returns:
DataFrame: Gradient approximation or unevaluated design
@pre (not isinstance(h, collections.Sequence)) |
(h.shape[0] == df_base.shape[1])
Examples:
>>> import grama as gr
>>> from grama.models import make_cantilever_beam
>>> md = make_cantilever_beam()
>>> df_nom = md >> gr.ev_nominal(df_det="nom")
>>> df_grad = md >> gr.ev_grad_fd(df_base=df_nom)
>>> df_grad >> gr.tf_gather("var", "val", gr.everything())
"""
## Check invariants
if not set(model.var).issubset(set(df_base.columns)):
raise ValueError("model.var must be subset of df_base.columns")
if var is None:
var = model.var
elif isinstance(var, str):
if var == "rand":
var = model.var_rand
elif var == "det":
var = model.var_det
else:
raise ValueError("var flag not recognized; use 'rand' or 'det'")
else:
if not set(var).issubset(set(model.var)):
raise ValueError("var must be subset of model.var")
var_fix = list(set(model.var).difference(set(var)))
## TODO
if skip == True:
raise NotImplementedError("skip not implemented")
## Build stencil
n_var = len(var)
stencil = eye(n_var) * h
stepscale = tile(atleast_2d(0.5 / h).T, (1, model.n_out))
outputs = model.out
nested_labels = [
list(map(lambda s_out: "D" + s_out + "_D" + s_var, outputs)) for s_var in var
]
grad_labels = list(itertools.chain.from_iterable(nested_labels))
## Loop over df_base
results = [] # TODO: Preallocate?
for row_i in range(df_base.shape[0]):
## Evaluate
df_left = eval_df(
model,
gr.tran_outer(
DataFrame(
columns=var, data=-stencil + df_base[var].iloc[[row_i]].values
),
df_base[var_fix].iloc[[row_i]],
),
append=False,
)
df_right = eval_df(
model,
gr.tran_outer(
DataFrame(
columns=var, data=+stencil + df_base[var].iloc[[row_i]].values
),
df_base[var_fix].iloc[[row_i]],
),
append=False,
)
## Compute differences
res = (stepscale * (df_right[outputs] - df_left[outputs]).values).flatten()
df_grad = DataFrame(columns=grad_labels, data=[res])
results.append(df_grad)
return concat(results).reset_index(drop=True)
ev_grad_fd = add_pipe(eval_grad_fd)
## Conservative quantile evaluation
# --------------------------------------------------
@curry
def eval_conservative(model, quantiles=None, df_det=None, append=True, skip=False):
r"""Evaluates a given model at conservative input quantiles
Uses model specifications to determine the "conservative" direction
for each input, and evaluates the model at the desired quantile.
Provided primarily for comparing UQ against pseudo-deterministic
design criteria.
Note that if there is no conservative direction for the given input,
the given quantile will be ignored and the median will automatically
be selected.
Args:
model (gr.Model): Model to evaluate
quantiles (numeric): lower quantile value(s) for conservative
evaluation; can be single value for all inputs, array
of values for each random variable, or None for default 0.01.
values in [0, 0.5]
df_det (DataFrame): Deterministic levels for evaluation; use "nom"
for nominal deterministic levels.
append (bool): Append results to conservative inputs?
skip (bool): Skip evaluation of the functions?
Returns:
DataFrame: Conservative evaluation or unevaluated design
Examples:
>>> import grama as gr
>>> from grama.models import make_plate_buckle
>>> md = make_plate_buckle()
>>> md >> gr.ev_conservative(df_det="nom")
"""
## Default behavior
if quantiles is None:
print("eval_conservative() using quantile default 0.01;")
print("provide `quantiles` keyword for non-default behavior.")
quantiles = [0.01] * model.n_var_rand
## Handle scalar vs vector quantiles
try:
len(quantiles)
except TypeError:
quantiles = [quantiles] * model.n_var_rand
## Modify quantiles for conservative directions
quantiles = [
0.5 + (0.5 - quantiles[i]) * model.density.marginals[model.var_rand[i]].sign
for i in range(model.n_var_rand)
]
quantiles = atleast_2d(quantiles)
## Draw samples
df_pr = | DataFrame(data=quantiles, columns=model.var_rand) | pandas.DataFrame |
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from evalml.pipelines.components import LabelEncoder
def test_label_encoder_init():
encoder = LabelEncoder()
assert encoder.parameters == {"positive_label": None}
assert encoder.random_seed == 0
def test_label_encoder_fit_transform_y_is_None():
X = pd.DataFrame({})
y = pd.Series(["a", "b"])
encoder = LabelEncoder()
with pytest.raises(ValueError, match="y cannot be None"):
encoder.fit(X)
encoder.fit(X, y)
with pytest.raises(ValueError, match="y cannot be None"):
encoder.inverse_transform(None)
def test_label_encoder_transform_y_is_None():
X = pd.DataFrame({})
y = pd.Series(["a", "b"])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X)
assert_frame_equal(X, X_t)
assert y_t is None
def test_label_encoder_fit_transform_with_numeric_values_does_not_encode():
X = pd.DataFrame({})
# binary
y = pd.Series([0, 1, 1, 1, 0])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y, y_t)
# multiclass
X = pd.DataFrame({})
y = pd.Series([0, 1, 1, 2, 0, 2])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y, y_t)
def test_label_encoder_fit_transform_with_numeric_values_needs_encoding():
X = pd.DataFrame({})
# binary
y = pd.Series([2, 1, 2, 1])
y_expected = pd.Series([1, 0, 1, 0])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
# multiclass
y = pd.Series([0, 1, 1, 3, 0, 3])
y_expected = pd.Series([0, 1, 1, 2, 0, 2])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
def test_label_encoder_fit_transform_with_categorical_values():
X = pd.DataFrame({})
# binary
y = pd.Series(["b", "a", "b", "b"])
y_expected = pd.Series([1, 0, 1, 1])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
# multiclass
y = pd.Series(["c", "a", "b", "c", "d"])
y_expected = pd.Series([2, 0, 1, 2, 3])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
def test_label_encoder_fit_transform_equals_fit_and_transform():
X = pd.DataFrame({})
y = pd.Series(["a", "b", "c", "a"])
encoder = LabelEncoder()
X_fit_transformed, y_fit_transformed = encoder.fit_transform(X, y)
encoder_duplicate = LabelEncoder()
encoder_duplicate.fit(X, y)
X_transformed, y_transformed = encoder_duplicate.transform(X, y)
assert_frame_equal(X_fit_transformed, X_transformed)
assert_series_equal(y_fit_transformed, y_transformed)
def test_label_encoder_inverse_transform():
X = pd.DataFrame({})
y = pd.Series(["a", "b", "c", "a"])
y_expected = ww.init_series(y)
encoder = LabelEncoder()
_, y_fit_transformed = encoder.fit_transform(X, y)
y_inverse_transformed = encoder.inverse_transform(y_fit_transformed)
assert_series_equal(y_expected, y_inverse_transformed)
y_encoded = pd.Series([1, 0, 2, 1])
y_expected = ww.init_series(pd.Series(["b", "a", "c", "b"]))
y_inverse_transformed = encoder.inverse_transform(y_encoded)
assert_series_equal(y_expected, y_inverse_transformed)
def test_label_encoder_with_positive_label_multiclass_error():
y = pd.Series(["a", "b", "c", "a"])
encoder = LabelEncoder(positive_label="a")
with pytest.raises(
ValueError,
match="positive_label should only be set for binary classification targets",
):
encoder.fit(None, y)
def test_label_encoder_with_positive_label_missing_from_input():
y = pd.Series(["a", "b", "a"])
encoder = LabelEncoder(positive_label="z")
with pytest.raises(
ValueError,
match="positive_label was set to `z` but was not found in the input target data.",
):
encoder.fit(None, y)
@pytest.mark.parametrize(
"y, positive_label, y_encoded_expected",
[
(
pd.Series([True, False, False, True]),
False,
pd.Series([0, 1, 1, 0]),
), # boolean
(
pd.Series([True, False, False, True]),
True,
pd.Series([1, 0, 0, 1]),
), # boolean
(
pd.Series([0, 1, 1, 0]),
0,
pd.Series([1, 0, 0, 1]),
), # int, 0 / 1, encoding should flip
(
pd.Series([0, 1, 1, 0]),
1,
pd.Series([0, 1, 1, 0]),
), # int, 0 / 1, encoding should not change
(
pd.Series([6, 2, 2, 6]),
6,
pd.Series([1, 0, 0, 1]),
), # ints, not 0 / 1, encoding should not change
(
pd.Series([6, 2, 2, 6]),
2,
pd.Series([0, 1, 1, 0]),
), # ints, not 0 / 1, encoding should flip
(pd.Series(["b", "a", "a", "b"]), "a", pd.Series([0, 1, 1, 0])), # categorical
(pd.Series(["b", "a", "a", "b"]), "b", pd.Series([1, 0, 0, 1])), # categorical
],
)
def test_label_encoder_with_positive_label(y, positive_label, y_encoded_expected):
encoder = LabelEncoder(positive_label=positive_label)
_, y_fit_transformed = encoder.fit_transform(None, y)
assert_series_equal(y_encoded_expected, y_fit_transformed)
y_inverse_transformed = encoder.inverse_transform(y_fit_transformed)
assert_series_equal(ww.init_series(y), y_inverse_transformed)
def test_label_encoder_with_positive_label_fit_different_from_transform():
encoder = LabelEncoder(positive_label="a")
y = pd.Series(["a", "b", "b", "a"])
encoder.fit(None, y)
with pytest.raises(ValueError, match="y contains previously unseen labels"):
encoder.transform(None, pd.Series(["x", "y", "x"]))
@pytest.mark.parametrize("use_positive_label", [True, False])
def test_label_encoder_transform_does_not_have_all_labels(use_positive_label):
encoder = LabelEncoder(positive_label="a" if use_positive_label else None)
y = pd.Series(["a", "b", "b", "a"])
encoder.fit(None, y)
expected = (
| pd.Series([1, 1, 1, 1]) | pandas.Series |
#!/bin/python3
""" Provides analysis about the R-Mappings """
__author__ = 'Loraine'
__version__ = '1.0'
import pandas as pd
from config import path
from scipy.stats import ttest_ind
class FirstExperiment(object):
def __init__(self,filename):
self.df = | pd.read_csv(path + filename) | pandas.read_csv |
#!/usr/bin/python3.7
import pandas as pd
import numpy as np
import os, sys
# Read in files.
critical_0to1000 = | pd.read_csv("Critical.0-1000.txt", skipinitialspace=True, names=['seqnum', 'mutant_position', 'original_class', 'mutant_class', 'original_score', 'mutant_score', 'delta_score'], header=0, delim_whitespace=True) | pandas.read_csv |
import pandas as pd
import numpy as np
from pathlib import Path
import time
from radon.raw import *
from radon.cli import *
from radon.complexity import *
from radon.metrics import *
import logging
import tqdm
import itertools
from func_timeout import func_timeout, FunctionTimedOut
import signal
import logging
import multiprocessing as mp
from multiprocessing_logging import install_mp_handler
import subprocess
import json
import os
# Paths of sample projeects
PATH_SAMPLE = Path("../Sample_Projects/round_2/").resolve()
# Paths of .csv files
PATH_CSV = Path("../csv/round_2/").resolve()
# Path of mi files
PATH_MI = Path("{0}/mi/".format(PATH_CSV)).resolve()
# Create the main directory for cloning projects
PATH_MI.mkdir(parents=True, exist_ok=True)
timeOut = 10
def calculateCC(path):
# print(path)
result = subprocess.check_output(['radon', 'cc', '--total-average', path], stderr= subprocess.STDOUT, timeout=timeOut)
# print(result.decode("utf-8"))
result_str = result.decode("utf-8")
if(len(result_str) > 0):
# Split the last line and collect the data
output = result_str.split("Average complexity: ")[-1].split(" ")
# print(result)
# Rank A - F
cc_rank = output[0]
# print(cc_rank)
# Remove (cc_score) -> cc_score and convert to float value
cc_score = float(output[1].replace("(", "").replace(")", "").strip())
# print(cc_score)
else:
cc_rank = np.NaN
cc_score = np.NaN
# print({'cc_rank': cc_rank, 'cc_score': cc_score})
return {'cc_rank': cc_rank, 'cc_score': cc_score}
def calculateHV(path):
# print(path)
result = subprocess.check_output(['radon', 'hal', '-j', path], stderr= subprocess.STDOUT, timeout=timeOut)
# print(result.decode("utf-8"))
result_str = result.decode("utf-8")
# print(stdout.decode("utf-8"))
# print(stderr)
x = result_str
js = json.loads(x)
hal = js[path]['total']
# print(hal)
# return array h1, h2, N1, N2, vocabulary, length, calculated_length, volume, difficulty, effort, time, bugs
# print({'h1': hal[0], 'h2': hal[1], 'N1': hal[2], 'N2': hal[3], 'vocabulary': hal[4], 'length': hal[5], 'calculated_length': hal[6], 'volume': hal[7], 'difficulty': hal[8], 'effort': hal[9], 'time': hal[10], 'bugs': hal[11]})
return {'h1': hal[0], 'h2': hal[1], 'N1': hal[2], 'N2': hal[3], 'vocabulary': hal[4], 'length': hal[5], 'calculated_length': hal[6], 'volume': hal[7], 'difficulty': hal[8], 'effort': hal[9], 'time': hal[10], 'bugs': hal[11]}
def calculateLOC(path):
result = subprocess.check_output(['radon', 'raw', '-j', path], stderr= subprocess.STDOUT, timeout=timeOut)
# print(result.decode("utf-8"))
result_str = result.decode("utf-8")
# print(stdout.decode("utf-8"))
# print(stderr)
x = result_str
js = json.loads(x)
raw = js[path]
# Return dict {'loc': 259, 'lloc': 151, 'single_comments': 48, 'sloc': 149, 'multi': 7, 'comments': 48, 'blank': 55}
return raw
# def calculateCC(code):
# # Register the signal function handler
# signal.signal(signal.SIGALRM, handler)
# # Define a timeout for your function (10 seconds)
# signal.alarm(10)
# try:
# arr = mi_parameters(code, count_multi=False)
# CC = arr[1]
# # print(mi_visit(txt, False))
# except SyntaxError:
# CC = -1
# finally:
# # Unregister the signal so it won't be triggered
# # if the timeout is not reached.
# signal.signal(signal.SIGALRM, signal.SIG_IGN)
# return CC
# def calculateHV(code, file):
# # Register the signal function handler
# signal.signal(signal.SIGALRM, handler)
# # Define a timeout for your function (10 seconds)
# signal.alarm(10)
# try:
# arr = mi_parameters(code, count_multi=False)
# hv = arr[0]
# except SyntaxError:
# hv = -1
# print(str(file))
# finally:
# # Unregister the signal so it won't be triggered
# # if the timeout is not reached.
# signal.signal(signal.SIGALRM, signal.SIG_IGN)
# return hv
# def calculateLOC(code):
# # Register the signal function handler
# signal.signal(signal.SIGALRM, handler)
# # Define a timeout for your function (10 seconds)
# signal.alarm(10)
# try:
# loc = analyze(code)
# # Module(loc=209, lloc=75, sloc=128, comments=8, multi=36, blank=37, single_comments=8)
# # sloc = loc[2]
# except SyntaxError:
# lst = [-1]
# loc = list(itertools.chain.from_iterable(itertools.repeat(x, 7) for x in lst))
# finally:
# # Unregister the signal so it won't be triggered
# # if the timeout is not reached.
# signal.signal(signal.SIGALRM, signal.SIG_IGN)
# return loc
# def calculatePercentComment(code):
# # Register the signal function handler
# signal.signal(signal.SIGALRM, handler)
# # Define a timeout for your function (10 seconds)
# signal.alarm(10)
# try:
# arr = mi_parameters(code, count_multi=False)
# # Convert percent to radian
# percent = arr[3]*0.06283
# except SyntaxError:
# percent = -1
# finally:
# # Unregister the signal so it won't be triggered
# # if the timeout is not reached.
# signal.signal(signal.SIGALRM, signal.SIG_IGN)
# return percent
def calculateMI(hv, cc, sloc):
if(hv != np.NaN and cc != np.NaN and sloc != np.NaN):
real_mi = np.clip(100 * (171-(5.2*np.log(hv+1))-(0.23*cc)-(16.2*np.log(sloc+1)))/171, 0, 100)
# print("MI:")
# print(100 * (171-(5.2*np.log(hv))-(0.23*cc)-(16.2*np.log(sloc)))/171)
# print(real_mi)
return real_mi
else:
return np.NaN
def mergeFile(path_project):
path = Path(path_project)
merge = ""
# Merge all .py files as one file
# Then, input it into radon API(mi_visit) to calculate MI for each project
for file in path_project.rglob('*.py'):
txt = open(str(file)).read()
merge = merge + "\n" + txt
return merge
def handler(signum, frame):
raise Exception("Time Out!")
def evaluate(path_project):
project_id = int(path_project.name)
temp = []
for dirpath, dirs, files in tqdm.tqdm(list(os.walk("{0}".format(path_project))), desc="File Level: {0}".format(project_id)):
for filename in files:
python = os.path.join(dirpath,filename)
# print(python)
if(python.endswith(".py")):
try:
path = "{0}".format(python)
# print(path)
hv = calculateHV(path)
# hv = func_timeout(10, calculateHV, args=code)
cc = calculateCC(path)
# print(cc)
# # cc = func_timeout(10, calculateCC, args=code)
raw = calculateLOC(path)
# # all_loc = func_timeout(10, calculateLOC, args=code)
real_mi = calculateMI(hv['volume'], cc['cc_score'], raw['sloc'])
d = {'project_id': path_project.name}
d.update(hv)
d.update(cc)
d.update(raw)
d.update({'mi': real_mi})
d.update({'path': path})
temp.append(d)
# logging.info("project_id: {0}, path: {1}".format(project_id, file))
# print(d)
except OSError as o:
logging.error("[{0}], project_id: {1}, path: {2}".format(o, project_id, python))
except Exception as e:
logging.error("[{0}], project_id: {1}, path: {2}".format(e, project_id, python))
# print(temp)
df = pd.DataFrame.from_dict(temp)
# print(df)
df.to_csv("{0}/{1}.csv".format(PATH_MI, project_id), index=False)
# print("########### Finish Calculating MI project: {0} ############".format(project_id))
return df
def dispatch_jobs(func, data):
# Get the number of CPU cores
numberOfCores = mp.cpu_count()
# print(numberOfCores)
# Data split by number of cores
# data_split = np.array_split(data, numberOfCores, axis=0)
# print(type(data_split[0]))
# set up logging to file
logging.basicConfig(filename='mi.log', filemode='w', level=logging.ERROR)
install_mp_handler()
with mp.Pool(processes=numberOfCores) as pool:
pool.map(func, data)
pool.close()
pool.join()
# result.to_csv("{0}/{1}".format(PATH_CSV, "mi_original.csv"), index=False)
print("########### Dispatch jobs Finished ############")
def contain(project_id, df):
flag = False
for id in df['project_id']:
if(id == project_id):
# print(path_project.name)
flag = True
return flag
def rerun():
rerun = pd.read_csv("/home/senior/senior/src/mi_rerun.csv")
for path_project in tqdm.tqdm(list(PATH_SAMPLE.iterdir()), desc="Project Level", total=len(rerun)):
project_id = int(path_project.name)
temp = []
if(contain(project_id, rerun)):
for dirpath, dirs, files in os.walk("{0}".format(path_project)):
for filename in tqdm.tqdm(list(files), desc="File Level: {0}".format(project_id)):
python = os.path.join(dirpath,filename)
# print(python)
if(python.endswith(".py")):
try:
path = "{0}".format(python)
# print(path)
hv = calculateHV(path)
# hv = func_timeout(10, calculateHV, args=code)
cc = calculateCC(path)
# print(cc)
# # cc = func_timeout(10, calculateCC, args=code)
raw = calculateLOC(path)
# # all_loc = func_timeout(10, calculateLOC, args=code)
real_mi = calculateMI(hv['volume'], cc['cc_score'], raw['sloc'])
d = {'project_id': path_project.name}
d.update(hv)
d.update(cc)
d.update(raw)
d.update({'mi': real_mi})
d.update({'path': path})
temp.append(d)
# logging.info("project_id: {0}, path: {1}".format(project_id, file))
# print(d)
except OSError as os:
logging.error("[{0}], project_id: {1}, path: {2}".format(os, project_id, python))
except Exception as e:
logging.error("[{0}], project_id: {1}, path: {2}".format(e, project_id, python))
# print(temp)
df = | pd.DataFrame.from_dict(temp) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_df_boolean_comparison_error(self):
# GH#4576
# boolean comparisons with a tuple/list give unexpected results
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
# not shape compatible
with pytest.raises(ValueError):
df == (2, 2)
with pytest.raises(ValueError):
df == [2, 2]
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
with pytest.raises(TypeError):
df.__eq__(None)
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH#15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH#15077 empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('timestamps', [
[pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2,
[pd.Timestamp('2012-01-01 13:00:00')] * 2])
def test_tz_aware_scalar_comparison(self, timestamps):
# Test for issue #15966
df = pd.DataFrame({'test': timestamps})
expected = pd.DataFrame({'test': [False, False]})
tm.assert_frame_equal(df == -1, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic(object):
def test_df_add_flex_filled_mixed_dtypes(self):
# GH#19611
dti = pd.date_range('2016-01-01', periods=3)
ser = pd.Series(['1 Day', 'NaT', '2 Days'], dtype='timedelta64[ns]')
df = pd.DataFrame({'A': dti, 'B': ser})
other = pd.DataFrame({'A': ser, 'B': ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{'A': pd.Series(['2016-01-02', '2016-01-03', '2016-01-05'],
dtype='datetime64[ns]'),
'B': ser * 2})
tm.assert_frame_equal(result, expected)
class TestFrameMulDiv(object):
"""Tests for DataFrame multiplication and division"""
# ------------------------------------------------------------------
# Mod By Zero
def test_df_mod_zero_df(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
result = df % df
tm.assert_frame_equal(result, expected)
def test_df_mod_zero_array(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values % df.values
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns, dtype='float64')
result2.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_int(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df % 0
expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') % 0
result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_series_does_not_commute(self):
# GH#3590, modulo as ints
# not commutative with series
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser % df
res2 = df % ser
assert not res.fillna(0).equals(res2.fillna(0))
# ------------------------------------------------------------------
# Division By Zero
def test_df_div_zero_df(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / df
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
tm.assert_frame_equal(result, expected)
def test_df_div_zero_array(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
with np.errstate(all='ignore'):
arr = df.values.astype('float') / df.values
result = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_df_div_zero_int(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / 0
expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)
expected.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') / 0
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_div_zero_series_does_not_commute(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser / df
res2 = df / ser
assert not res.fillna(0).equals(res2.fillna(0))
class TestFrameArithmetic(object):
@pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano',
strict=True)
def test_df_sub_datetime64_not_ns(self):
df = pd.DataFrame(pd.date_range('20130101', periods=3))
dt64 = np.datetime64('2013-01-01')
assert dt64.dtype == 'datetime64[D]'
res = df - dt64
expected = pd.DataFrame([ | pd.Timedelta(days=0) | pandas.Timedelta |
#%%
import json
from itertools import chain, cycle
import numpy as np
import pandas as pd
# from pandas import json_normalize
from pandas.io.json import json_normalize
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('notebook')
import sys
import os
# %%
## Connect to Postgres database with data and DBT error views
import psycopg2 as pg
import yaml
from pathlib import Path
with open(Path.home() / ".dbt/profiles.yml") as f:
db_config = yaml.load(f,Loader=yaml.FullLoader)
db_credentials = db_config['default']['outputs']['dev']
conn = pg.connect(host=db_credentials['host'], user=db_credentials['user'],
password=db_credentials['<PASSWORD>'], port=db_credentials['port'],
dbname=db_credentials['dbname'] )
# %%
## Load and parse DBT error results
with open('dbt/target/run_results_test.json') as f:
run_results = json.load(f)
dbt_results = json_normalize(run_results['results'])
# Get rid of verbose columns we don't need
dbt_results.drop(list(dbt_results.filter(regex = '_sql')), axis = 1, inplace = True)
dbt_results.drop(list(dbt_results.filter(regex = '_path')), axis = 1, inplace = True)
dbt_results.rename(columns={'node.alias': 'test_name'}, inplace=True)
# Unravel nested list of refs and add tag columns
dbt_results['referenced_tables'] = dbt_results['node.refs'].apply(chain.from_iterable).apply(list)
# Separate out the tests that have successfully created views from those that failed
errors = dbt_results[dbt_results['status']=='ERROR'][['test_name','error']]
successes = dbt_results[dbt_results['status']=='CREATE VIEW'][['test_name', 'referenced_tables']]
# %%
# Copy the views from the database created by the test runs to local dataframes
# Some views don't have UUIDS so mark those as wrong
test_dfs = {}
wrong_outputs = []
for s in successes['test_name']:
test_table = pd.read_sql(f'SELECT * FROM analytics.{s}',conn)
if not any(col in test_table for col in ['uuid','from_uuid','chv_uuid','chu_uuid']):
# print(f'{s} does not contain uuid')
wrong_outputs.append(s)
else:
test_dfs[s] = pd.read_sql(f'SELECT * FROM analytics.{s}',conn)
# %%
from ast import literal_eval
ge_results = pd.read_csv('generated_reports/ge_clean_results.csv')
ge_results.rename(columns={'result.short_name':'test_name'}, inplace=True)
ge_results['referenced_tables'] = ge_results['result.table'].apply(lambda x : [x])
ge_successes = ge_results[~ ge_results['error']][['test_name', 'referenced_tables']]
ge_errors = ge_results[~ ge_results['error']][['test_name','error']]
for test in ge_successes['test_name']:
test_row = ge_results[ge_results['test_name'] == test]
test_column = test_row['result.id_column'].values[0]
error_value = test_row['result.unexpected_list'].apply(literal_eval).values[0]
if test_column in ['uuid','from_uuid','chv_uuid','chu_uuid']:
test_dfs[test] = pd.DataFrame(error_value, columns=[test_column])
else:
wrong_outputs.append(test)
# combine GE results with DBT results
successes = successes.append(ge_successes, ignore_index=True)
errors = errors.append(ge_errors, ignore_index=True)
successes = successes[~successes['test_name'].isin(wrong_outputs)]
wrong = pd.DataFrame(wrong_outputs, columns=['test_name'])
wrong['error'] = 'Output view does not contain UUID'
errors = errors.append(wrong, ignore_index=True)
#%%
tables = ['patient', 'assessment', 'assessment_follow_up', 'pregnancy',
'pregnancy_follow_up', 'postnatal_follow_up', 'chv', 'supervisor',
'household','delivery','immunization']
tags = ['tag_' + r for r in tables]
for i in tables:
successes['tag_' + i] = successes.apply(lambda x: int(i in x['referenced_tables']), axis=1)
# %%
ref_dfs = {}
for t in tables:
ref_dfs[t] = pd.read_sql(f'SELECT * FROM {t}',conn)
# Iterating through each table in the database, pull out the relevant tests and put stats in a dataframe
OUTPUT_DIR = './generated_reports/'
table_outputs = []
test_outputs = []
for table in tables:
# print(table)
tests = successes[successes['tag_'+table]==1]['test_name'].to_list()
if len(tests) == 0:
continue
dfs = [test_dfs[t] for t in tests]
ref = ref_dfs[table]
if table == 'chv':
uuid_key = '<KEY>' #chv table has a different naming convention
elif table == 'supervisor':
uuid_key = '<KEY>' #supervisor table too!
else:
uuid_key = 'uuid'
if 'reported' in ref:
## for tables that have a time associated with entries (not chv or supervisor)
## group by time and plot.
ref['reported'] = pd.to_datetime(ref['reported'])
plot_data = []
for t,d in zip(tests, dfs):
if 'reported' in d: # For tests on one table we have all the data we need
subset = d
subset['reported'] = pd.to_datetime(subset['reported'])
elif uuid_key in d:
subset = pd.merge(ref,d,on=uuid_key,how='inner',suffixes=('','_'))
elif ('from_uuid' in d) or ('to_id' in d):
referenced_tables = successes[successes['test_name']==t]['referenced_tables']
key = 'from_uuid' if referenced_tables.iloc[0][0] == table else 'to_id'
subset = pd.merge(ref,d,left_on=uuid_key,right_on=key,how='inner',suffixes=('','_'))
subset_resamp = subset[subset['reported'].dt.year>=2017].groupby(pd.Grouper(key='reported',freq='1W'))[uuid_key].count()
if subset.shape[0] != 0:
plot_data.append((t, subset_resamp) )
n_plots = len(plot_data)+1
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = cycle(prop_cycle.by_key()['color'])
fig, ax = plt.subplots(n_plots, 1, figsize=(10,n_plots*3),sharex=False)
ref_resamp = ref[ref['reported'].dt.year>=2017].groupby(pd.Grouper(key='reported',freq='1W'))[uuid_key].count()
ref_resamp.plot(ax=ax[0], label=table, color=next(colors))
ax[0].legend(loc='upper left')
ax[0].set_title(table, fontsize=18)
for data,a in zip(plot_data,ax[1:]):
t, resamp = data
(resamp/ref_resamp).plot(ax=a, color=next(colors))
a.set_title(t)
a.set_ylabel('Fraction IoP')
plt.tight_layout()
plt.savefig(OUTPUT_DIR + table +'.png')
test_output = []
all_errors = []
for t,d in zip(tests, dfs):
if ('from_uuid' in d) or ('to_id' in d):
referenced_tables = successes[successes['test_name']==t]['referenced_tables']
key = 'from_uuid' if referenced_tables.iloc[0][0] == table else 'to_id'
else:
key = uuid_key
row_errors = d[key].dropna().values.tolist()
all_errors += row_errors
test_output.append({
'Test': t,
'Table': table,
'Errors': len(row_errors),
'% Error': len(row_errors)/ref.shape[0]*100
})
test_outputs += test_output
unique_errors = pd.Series(all_errors).nunique()
table_output = {}
table_output['Table'] = table
table_output['Unique Rows with Error'] = unique_errors
table_output['Total Rows'] = ref.shape[0]
table_output['% Error'] = table_output['Unique Rows with Error']/table_output['Total Rows']*100
table_outputs.append(table_output)
table_outputs = pd.DataFrame(table_outputs)
test_outputs = | pd.DataFrame(test_outputs) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
],
[
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
],
[pd.DataFrame([]), pd.DataFrame([], index=[100])],
[
pd.DataFrame([]),
pd.DataFrame([], index=[100]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
)
@pytest.mark.parametrize(
"other",
[
[[1, 2], [10, 100]],
[[1, 2, 10, 100, 0.1, 0.2, 0.0021]],
[[]],
[[], [], [], []],
[[0.23, 0.00023, -10.00, 100, 200, 1000232, 1232.32323]],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
ps = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Can only append a Series if ignore_index=True "
"or if the Series has a name",
):
df.append(ps)
def test_cudf_arrow_array_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Table via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Table, consider "
"using .to_arrow()",
):
df.__arrow_array__()
sr = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
sr = cudf.Series(["a", "b", "c"])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("axis", [0, 1])
def test_dataframe_sample_basic(n, frac, replace, axis):
# as we currently don't support column with same name
if axis == 1 and replace:
return
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5],
"float": [0.05, 0.2, 0.3, 0.2, 0.25],
"int": [1, 3, 5, 4, 2],
},
index=[1, 2, 3, 4, 5],
)
df = cudf.DataFrame.from_pandas(pdf)
random_state = 0
try:
pout = pdf.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
except BaseException:
assert_exceptions_equal(
lfunc=pdf.sample,
rfunc=df.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
)
else:
gout = df.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
assert pout.shape == gout.shape
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("random_state", [1, np.random.mtrand.RandomState(10)])
def test_dataframe_reproducibility(replace, random_state):
df = cudf.DataFrame({"a": cupy.arange(0, 1024)})
expected = df.sample(1024, replace=replace, random_state=random_state)
out = df.sample(1024, replace=replace, random_state=random_state)
assert_eq(expected, out)
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
def test_series_sample_basic(n, frac, replace):
psr = pd.Series([1, 2, 3, 4, 5])
sr = cudf.Series.from_pandas(psr)
random_state = 0
try:
pout = psr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
except BaseException:
assert_exceptions_equal(
lfunc=psr.sample,
rfunc=sr.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
)
else:
gout = sr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
assert pout.shape == gout.shape
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_empty(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.empty, gdf.empty)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_size(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.size, gdf.size)
@pytest.mark.parametrize(
"ps",
[
pd.Series(dtype="float64"),
pd.Series(index=[100, 10, 1, 0], dtype="float64"),
pd.Series([], dtype="float64"),
pd.Series(["a", "b", "c", "d"]),
pd.Series(["a", "b", "c", "d"], index=[0, 1, 10, 11]),
],
)
def test_series_empty(ps):
ps = ps
gs = cudf.from_pandas(ps)
assert_eq(ps.empty, gs.empty)
@pytest.mark.parametrize(
"data",
[
[],
[1],
{"a": [10, 11, 12]},
{
"a": [10, 11, 12],
"another column name": [12, 22, 34],
"xyz": [0, 10, 11],
},
],
)
@pytest.mark.parametrize("columns", [["a"], ["another column name"], None])
def test_dataframe_init_with_columns(data, columns):
pdf = pd.DataFrame(data, columns=columns)
gdf = cudf.DataFrame(data, columns=columns)
assert_eq(
pdf,
gdf,
check_index_type=False if len(pdf.index) == 0 else True,
check_dtype=False if pdf.empty and len(pdf.columns) else True,
)
@pytest.mark.parametrize(
"data, ignore_dtype",
[
([pd.Series([1, 2, 3])], False),
([pd.Series(index=[1, 2, 3], dtype="float64")], False),
([pd.Series(name="empty series name", dtype="float64")], False),
(
[pd.Series([1]), pd.Series([], dtype="float64"), pd.Series([3])],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
False,
),
([pd.Series([1, 2, 3], name="hi")] * 10, False),
([pd.Series([1, 2, 3], name=None, index=[10, 11, 12])] * 10, False),
(
[
pd.Series([1, 2, 3], name=None, index=[10, 11, 12]),
pd.Series([1, 2, 30], name=None, index=[13, 144, 15]),
],
True,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
| pd.Series([], name="abc", dtype="float64") | pandas.Series |
import re
import fnmatch
import os, sys, time
import pickle, uuid
from platform import uname
import pandas as pd
import numpy as np
import datetime
from math import sqrt
from datetime import datetime
import missingno as msno
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa import seasonal
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import adfuller, kpss
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
from scipy import signal
import pmdarima as pm
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.patches as mpatches
from pandas.plotting import lag_plot
import seaborn as sns
from pylab import rcParams
from sklearn.ensemble import RandomForestRegressor
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler, LabelEncoder, OneHotEncoder
from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error
# import keras
# from keras.models import Sequential
# from keras.layers import Dense
# from keras.layers import LSTM
# from keras.layers import Dropout
# from keras.layers import *
# from keras.callbacks import EarlyStopping
from src.Config import Config
from .Model import Model
class Logger(object):
info = print
critical = print
error = print
class Train(Config):
REGRESSION_ALGORITHMS = dict(
# # Supervised Learning
XGBR = dict(alg=XGBRegressor, args=dict(silent=1, random_state=self.MODELLING_CONFIG["RANDOM_STATE"], objective="reg:squarederror"), scaled=False),
LGBMR = dict(alg=LGBMRegressor, args=dict(random_state=self.MODELLING_CONFIG["RANDOM_STATE"]), scaled=False),
RFR = dict(alg=RandomForestRegressor, args=dict(n_estimators=100, random_state=self.MODELLING_CONFIG["RANDOM_STATE"]), scaled=False),
RFR_tuned = dict(alg=RandomForestRegressor, args=dict(random_state=self.MODELLING_CONFIG["RANDOM_STATE"]), scaled=False,
param_grid={
'n_estimators': [20, 50, 100, 200, 500], #100
'max_depth':[2, 4, None, 8], #None
'min_samples_split':[0.5, 2, 4, 10], #2
'max_features':[1, 2, None, 3, 6], #auto
},
),
XGBR_tuned = dict(alg=XGBRegressor, args=dict(silent=1, random_state=self.MODELLING_CONFIG["RANDOM_STATE"], objective="reg:squarederror"), scaled=False,
param_grid={
'learning_rate':[0.01, 0.05, 0.1, 0.3],#, 0.5, 0.9],
'max_depth': [2, 3, 6, 10, 13], #3
'n_estimators': [20, 50, 200],#, 500], #100
#'booster': ['gbtree', 'dart'], #'gbtree'
'colsample_bytree': [0.2, 0.5, 0.8, 1.0],
'subsample': [0.2, 0.5, 0.8, 1.0],
# 'early_stopping_rounds': [200],
},
),
LGBMR_tuned = dict(alg=LGBMRegressor, args=dict(random_state=self.MODELLING_CONFIG["RANDOM_STATE"]), scaled=False,
param_grid={
'learning_rate':[0.01, 0.05, 0.1, 0.3, 0.9], #0.1
'max_depth':[2, 3, 6],
'n_estimators': [20, 50, 100, 200], #100
'num_leaves': [4, 8, 64], #31
# 'subsample': [0.2, 0.5, 0.8, 1.0],
# 'bagging_fraction': [0.2, 0.5, 0.8, 1.0],
# 'early_stopping_rounds': [200]
#'boosting' : ['gbdt', 'dart', 'goss'],
},
),
)
SARIMA = pm.auto_arima
FORECAST_ALGORITHMS = dict(
# # Forecasting
ARIMA = dict(alg=ARIMA, args=dict(order=(Config.MODELLING_CONFIG['ARIMA_P'], Config.MODELLING_CONFIG['ARIMA_D'], Config.MODELLING_CONFIG['ARIMA_Q']))),
SARIMA = dict(alg=SARIMA, args=dict(start_p=1, d=0, start_q=1, max_p=5, max_d=2, max_q=5, m=7,
start_P=0, D=0, start_Q=0, max_P=5, max_D=2, max_Q=5,
seasonal=True, trace=True, error_action='ignore', suppress_warnings=True, stepwise=True)),
HOLT_WINTER = dict(alg=ExponentialSmoothing, args=dict(seasonal_periods=Config.MODELLING_CONFIG["HOLT_WINTER_SEASON"], trend=Config.MODELLING_CONFIG["HOLT_WINTER_TREND"], seasonal=Config.MODELLING_CONFIG["HOLT_WINTER_SEASONAL"])),
)
def __init__(self, var, logger=Logger(), suffix=""):
self.logger = logger
self.models = {}
self.axis_limit = [1e10, 0]
self.suffix = suffix
self.meta = dict(
var = var,
stime = datetime.now(),
user = os.getenv('LOGNAME') or os.getlogin(),
sys = uname()[1],
py = '.'.join(map(str, sys.version_info[:3])),
)
@staticmethod
def vars(types=[], wc_vars=[], qreturn_dict=False):
""" Return list of variable names
Acquire the right features from dataframe to be input into model.
Featurs will be acquired based the value "predictive" in the VARS dictionary.
Parameters
----------
types : str
VARS name on type of features
Returns
-------
Features with predictive == True in Config.VARS
"""
if types==None:
types = [V for V in Config.VARS]
selected_vars = []
for t in types:
for d in Config.VARS[t]:
if not d.get('predictive'):
continue
if len(wc_vars) != 0:
matched_vars = fnmatch.filter(wc_vars, d['var'])
if qreturn_dict:
for v in matched_vars:
dd = d.copy()
dd['var'] = v
selected_vars.append(dd)
else:
selected_vars.extend(matched_vars)
else:
if qreturn_dict and not d in selected_vars:
selected_vars.append(d)
else:
if not d['var'] in selected_vars:
selected_vars.append(d['var'])
return selected_vars
def read_csv_file(self, vars, fname=None, feature_engineer=Config.MODELLING_CONFIG["FEATURE_ENGINEERING"], **args):
"""Read in csv files
Read in csv files from multiple data sources
Parameters
----------
source_type : str
Option to decide whether to read in single or multiple csv files
fname : str (default=None)
Name of csv file:
- If "source_type" = "single", key in the csv file name without extension
- If "source_type" = "multiple", do not need to key in anything, just leave it in default
Returns
-------
data : object
Dataframe
"""
self.meta['feature_engineer'] = feature_engineer
self.logger.info("Preparing data for modeling ...")
self.sources = [vars]
if self.meta['feature_engineer'] == True:
self.sources.append("Feature_Engineer")
elif self.meta['feature_engineer'] == False:
self.sources = self.sources
try:
fname = "{}.csv".format(fname)
self.data = pd.read_csv(os.path.join(Config.FILES["DATA_LOCAL"], fname))
cols = self.vars(self.sources, self.data.columns)
self.data = self.data[cols + ["Date", "District", "Prod_Sales"]]
if self.data.size == 0:
self.logger.warning("no data found in file {}".format(fname))
if self.logger == print:
exit()
except FileNotFoundError:
self.logger.critical("file {} is not found".format(fname))
if self.logger == print:
exit()
fname = os.path.join(self.FILES["DATA_LOCAL"], "{}{}.csv".format(Config.FILES["MERGED_DATA"], self.suffix))
self.data.to_csv(fname)
self.logger.info("done.")
return
def run(self, algorithms=['ARIMA'], district=None, metric_eval="test", cv_type="loo", model_type=Config.MODELLING_CONFIG["MODEL_TYPE"]):
"""Initiate the modelling
Set the arguments in running the model.
Most of the model testing can be configure in this method
Parameters
----------
algorithms : array-like, (default='LGBMR')
Models to run;
Models to run include hyperparameters set and any tuning can be adjusted in the algorithms.
district : str
District with product sales on either BIODIESEL or PRIMAX-95
metric_eval : str, optional (default='test')
To determine whether to run cross-validation on per-district model;
- If "test", no cross validation will be performed for per-district model.
- If "cv", cross validation will be performed for per-district model.
cv_type : str, optional (default='loo')
Type of cross validation method to used in modelling;
- If "loo", Leave One Out cross validation will be performed for per-district model.
- If "kf", K-Fold cross validation will be performed for per-district model.
Returns
-------
Model results with best algorithm, metrics and saved model file in pickle format
"""
self.data.reset_index(inplace=True)
self.data[["Date"]] = pd.to_datetime(self.data["Date"])
assert metric_eval in self.MODELLING_CONFIG["METRIC_EVAL_TYPE"]
assert cv_type in self.MODELLING_CONFIG["CV_FOLD_TYPE"]
self.metric_eval = metric_eval
self.cv_type = cv_type
self.meta['metric_eval'] = metric_eval
self.meta['cv_type'] = cv_type
self.meta['SPLIT_RATIO'] = self.MODELLING_CONFIG["SPLIT_RATIO"]
self.meta["model_type"] = self.MODELLING_CONFIG["MODEL_TYPE"]
if district == None:
district = self.data["District"].unique()
self.data = self.data[self.data["District"].isin(district)]
if self.meta["model_type"] == "Forecasting":
self.forecasting(self.data, algorithms)
elif self.meta["model_type"] == "Supervised":
self.regression(self.data, algorithms)
self.sort_models()
self.get_results()
self.meta['runtime'] = datetime.now() - self.meta['stime']
self.meta['algorithms'] = algorithms
self.logger.info("Training finished in {}.".format(self.meta['runtime']))
def regression(self, data, algorithms, column_name="District"):
"""Run the regression
Run the regression model on each clustering_type defined with different models.
Parameters
----------
data : str
Merged dataframe
algorithms : str
Types of models
column_name : str, optional (default='District')
Unique column to used to subset the dataframe
"""
self.logger.info("Training using regression algorithms with evaluation type on '{}':".format(self.meta['metric_eval']))
# # loop over algorithms in supervised learning
for algorithm in algorithms:
start = time.time()
self.logger.info(" Training using regression algorithm {} ...".format(algorithm))
# # loop over district
n_districts = data[column_name].nunique()
i_district = 0
for district, group_data in data.groupby(column_name):
self.logger.info(" Building model for {} {} with total {} records ({} out of {}):"\
.format(column_name, district, group_data.shape[0], i_district, n_districts))
start_district = time.time()
group_data = group_data.dropna(axis='columns', how = 'all')
if not "{}".format(self.meta["var"]) in group_data.columns:
self.logger.info(" There is no {} measurement for district : {}. Skipping...".format(self.meta["var"], district))
continue
if 'Feature_Engineer' not in self.sources:
self.sources.append('Feature_Engineer')
predictives = [col for col in group_data.columns if col in self.vars(self.sources) and col != self.meta["var"]]
vars_impute_interp = []
vars_impute_knn = []
for var in self.vars(self.sources, group_data.columns):
v = next(v for source in self.sources for v in self.vars([source], group_data, True) if v['var'] == var)
if algorithm in self.MODELLING_CONFIG["IMPUTE_ALGORITHMS"]:
if v.get("impute", '') == 'interp':
vars_impute_interp.append(var)
else:
vars_impute_knn.append(var)
else:
if v.get("impute", '') == 'interp':
vars_impute_interp.append(var)
elif v.get("impute", '') == 'knn':
vars_impute_knn.append(var)
else:
pass # no imputation
if vars_impute_interp != []:
try:
self.logger.info(" interpolation for {} ...".format(', '.join(vars_impute_interp)))
group_data.loc[:, vars_impute_interp] = group_data.loc[:, vars_impute_interp].interpolate(limit_direction='both')
except ValueError:
self.logger.info(" Not enough data point in {} for KNN imputation and interpolation ...".format(', '.join(vars_impute_knn)))
if vars_impute_knn != []:
try:
self.logger.info(" KNN imputation for {} ...".format(', '.join(vars_impute_knn)))
group_data.loc[:, vars_impute_knn] = self.knn_impute(group_data.loc[:, vars_impute_knn])
except ValueError:
self.logger.info(" Not enough data point in {} for KNN imputation and interpolation ...".format(', '.join(vars_impute_knn)))
group_data = group_data[group_data[self.meta["var"]].notnull()]
self.logger.info(" Remove observations with null value for response; new # of observations: {} (previously {})".format(group_data.shape[0], self.data.shape[0]))
k = max(len(predictives) + 1, 5)
kk = group_data.shape[0]*(Config.MODELLING_CONFIG["SPLIT_RATIO"])
if kk < k:
self.logger.info(" Skipping model for {} {}; too few points: {}; minimum {} points required." \
.format(column_name, district, group_data.shape[0], int(k / (1-Config.MODELLING_CONFIG["SPLIT_RATIO"]))+1))
continue
# # create model object, set response and independent variables (predictives)
if not district in self.models:
self.models[district] = []
model = Model(self.REGRESSION_ALGORITHMS[algorithm], district, self.meta["var"], predictives)
model.set_props(algorithm, group_data)
if self.REGRESSION_ALGORITHMS[algorithm]['scaled']:
model.regression_scalar(group_data)
else:
model.regression_tree(group_data, self.meta['metric_eval'], self.meta['cv_type'])
self.models[district].append(model)
self.logger.info(" Metrics:: {}".format(', '.join(["{}:{:.2f}".format(m, v) for m, v in model.metrics.items()])))
if hasattr(model, 'metrics_holdout'):
self.logger.info(" Holdout Metrics:: {}".format(', '.join(["{}:{:.2f}".format(m, v) for m, v in model.metrics_holdout.items()])))
self.logger.info(" {} {} trained using '{:d}' records in {:0.1f}s".format(district, column_name, group_data.shape[0], time.time()-start_district))
i_district += 1
#if i_district > 2: break
self.logger.info(" {} {}(s) trained using {} algorithm in {:0.2f}s".format(i_district, column_name, algorithm, time.time()-start))
def forecasting(self, data, algorithms, column_name="District", univariate=Config.MODELLING_CONFIG["UNIVARIATE_OPTION"], seasonal=Config.MODELLING_CONFIG["SEASONAL_OPTION"]):
"""Run the regression / forecasting / heuristics model
Run the regression model on each clustering_type defined with different models.
Parameters
----------
data : str
Merged dataframe
algorithms : str
Types of models
column_name : str, optional (default='District')
Unique column to used to subset the dataframe
"""
self.meta["univariate"] = self.MODELLING_CONFIG["UNIVARIATE_OPTION"]
self.meta["seasonal"] = self.MODELLING_CONFIG["SEASONAL_OPTION"]
self.logger.info("Training using forecasting algorithms with evaluation type on '{}':".format(self.meta['metric_eval']))
# # loop over algorithms in forecasting algorithms
for algorithm in algorithms:
start = time.time()
self.logger.info(" Training using forecasting algorithm {} ...".format(algorithm))
# # loop over district
n_districts = data[column_name].nunique()
i_district = 0
for district, group_data in data.groupby(column_name):
self.logger.info(" Building model for {} {} with total {} records ({} out of {}):"\
.format(column_name, district, group_data.shape[0], i_district, n_districts))
start_district = time.time()
group_data = group_data.dropna(axis='columns', how = 'all')
if not "{}".format(self.meta["var"]) in group_data.columns:
self.logger.info(" There is no {} measurement for string : {}. Skipping...".format(self.meta["var"], district))
continue
if self.meta["univariate"] == True:
predictives = [col for col in group_data.columns if col in self.meta["var"]]
elif self.meta["univariate"] == False:
predictives = [col for col in group_data.columns if col in self.vars(self.sources) and col != self.meta["var"]]
vars_impute_interp = []
vars_impute_knn = []
for var in self.vars(self.sources, group_data.columns):
v = next(v for source in self.sources for v in self.vars([source], group_data, True) if v['var'] == var)
if algorithm in self.MODELLING_CONFIG["IMPUTE_ALGORITHMS"]:
if v.get("impute", '') == 'interp':
vars_impute_interp.append(var)
else:
vars_impute_knn.append(var)
else:
if v.get("impute", '') == 'interp':
vars_impute_interp.append(var)
elif v.get("impute", '') == 'knn':
vars_impute_knn.append(var)
else:
pass # no imputation
if vars_impute_interp != []:
try:
self.logger.info(" interpolation for {} ...".format(', '.join(vars_impute_interp)))
group_data.loc[:, vars_impute_interp] = group_data.loc[:, vars_impute_interp].interpolate(limit_direction='both')
except ValueError:
self.logger.info(" Not enough data point in {} for KNN imputation and interpolation ...".format(', '.join(vars_impute_knn)))
if vars_impute_knn != []:
try:
self.logger.info(" KNN imputation for {} ...".format(', '.join(vars_impute_knn)))
group_data.loc[:, vars_impute_knn] = self.knn_impute(group_data.loc[:, vars_impute_knn])
except ValueError:
self.logger.info(" Not enough data point in {} for KNN imputation and interpolation ...".format(', '.join(vars_impute_knn)))
group_data = group_data[group_data[self.meta["var"]].notnull()]
self.logger.info(" Remove observations with null value for response; new # of observations: {} (previously {})".format(group_data.shape[0], self.data.shape[0]))
k = max(len(predictives) + 1, 5)
kk = group_data.shape[0]*(Config.MODELLING_CONFIG["SPLIT_RATIO"])
if kk < k:
self.logger.info(" Skipping model for {} {}; too few points: {}; minimum {} points required." \
.format(column_name, district, group_data.shape[0], int(k / (1-Config.MODELLING_CONFIG["SPLIT_RATIO"]))+1))
continue
# # create model object, set response and independent variables (predictives)
if not district in self.models:
self.models[district] = []
model = Model(self.FORECAST_ALGORITHMS[algorithm], district, self.meta["var"], predictives)
model.set_props(algorithm, group_data)
model.forecast_model(group_data, self.meta["seasonal"])
self.models[district].append(model)
self.logger.info(" Metrics:: {}".format(', '.join(["{}:{:.2f}".format(m, v) for m, v in model.metrics.items()])))
if hasattr(model, 'metrics_holdout'):
self.logger.info(" Holdout Metrics:: {}".format(', '.join(["{}:{:.2f}".format(m, v) for m, v in model.metrics_holdout.items()])))
self.logger.info(" {} {} trained using '{:d}' records in {:0.1f}s".format(district, column_name, group_data.shape[0], time.time()-start_district))
i_district += 1
#if i_district > 2: break
self.logger.info(" {} {}(s) trained using {} algorithm in {:0.2f}s".format(i_district, column_name, algorithm, time.time()-start))
def sort_models(self):
"""Sort the models base on the selected metric
The results from model will be sorted from the metric score;
The primary metric score defined is R2 score;
You can select the threshold of metric to be displayed in chart.
"""
self.meta["METRIC_BEST"] = self.MODELLING_CONFIG["METRIC_BEST"]
self.meta["METRIC_BEST_THRESH"] = self.MODELLING_CONFIG.get("METRIC_BEST_THRESH", None)
self.logger.info(" Sorting models per district base on metric '{}'".format(self.meta["METRIC_BEST"]))
reverse = False if self.meta["METRIC_BEST"] in ["MAE", "MAPE", "RMSE", "MSE"] else True
self.best_district = []
for district in self.models:
self.models[district].sort(key=lambda x: x.metrics[self.meta["METRIC_BEST"]], reverse=reverse)
if self.meta["METRIC_BEST_THRESH"] != None:
metric_value = self.models[district][0].metrics[self.meta["METRIC_BEST"]]
if (not reverse and metric_value < self.meta["METRIC_BEST_THRESH"] ) or \
(reverse and metric_value > self.meta["METRIC_BEST_THRESH"] ):
self.best_district.append(district)
min_x = min(self.models[district][0].actual.min(), self.models[district][0].pred.min())
if min_x < self.axis_limit[0]:
self.axis_limit[0] = min_x
max_x = max(self.models[district][0].actual.max(), self.models[district][0].pred.max())
if max_x > self.axis_limit[1]:
self.axis_limit[1] = max_x
def save_models(self, fname=""):
"""Saving the trained models to pickle files
Model will be saved as a pickle file with extension on .sa
Parameters
----------
fname : str (default='')
Read in model file with extension .sa
Returns
-------
Models file in .sa extension format
"""
self.meta["n_models"] = len(self.models)
training = dict(
models = {w: [self.models[w][0]] for w in self.models},
meta = self.meta
)
if fname == "":
fname = os.path.join(self.FILES["DATA"], self.FILES["MODELS"], self.meta["var"] + '_' + '.' + Config.NAME["short"].lower())
if os.path.exists(fname):
os.remove(fname)
with open(fname, 'wb') as handle:
pickle.dump(training, handle, protocol=pickle.HIGHEST_PROTOCOL)
self.logger.info("Training and its models saved to file '{}'.".format(fname))
def load_models(self, path, append=False):
"""Loading the trained models from pickle files
Model with extension on .sa will be loaded as input in dashboard
Parameters
----------
path : str
Input directory where model files are stored
----------
"""
file_path = os.path.join(path, self.meta["var"] + '.' + Config.NAME["short"].lower()) if not os.path.isfile(path) else path
with open(file_path, 'rb') as handle:
training = pickle.load(handle)
if not append:
self.models = training["models"]
self.meta = training["meta"]
self.meta["n_models"] = len(self.models)
else:
if training["meta"]["var"] != self.meta["var"]:
self.logger.critical(" existing training is for response '{}', \
while the loading train is for response '{}'.".format(self.meta["var"], training["meta"]["var"]))
self.models.update(training["models"])
self.meta['runtime'] += training["meta"]['runtime']
self.meta["n_models"] += len(training["models"])
def predict(self, df_test, metric=Config.MODELLING_CONFIG["PREDICT_METRIC_CONF"]):
"""Predict sales
Use model (.sa) file created from different model types, then use the model file to do prediction of sales.
Parameters
----------
df_test : object (default=district_test_{source}.csv & reservoir_{source}.csv)
Merge dataframe from input data source
Returns
-------
df_result : object
Dataframe on predicted sales for each cluster column with its best metrics
"""
cluster_col = 'District'
df_result = pd.DataFrame({"{}".format(cluster_col): [], "DATE": [], self.meta["var"]: [], "METRIC":[]})
for district_name, district_data in df_test.groupby("{}".format(cluster_col)):
if district_name in self.models:
model_accu = [self.models[district_name][0].metrics[self.MODELLING_CONFIG["METRIC_BEST"]]]*district_data.shape[0]
preds = pd.DataFrame({
"{}".format(cluster_col): pd.Series([district_name]*district_data.shape[0]),
"DATE": pd.Series(district_data.index),
self.meta["var"]: pd.Series(self.models[district_name][0].predict(district_data)),
"METRIC": pd.Series(model_accu),
})
preds[self.meta["var"]] = preds[self.meta["var"]].round(1)
df_result = pd.concat([df_result, preds])
if metric in ['False', False]:
df_result.drop(columns=["METRIC"], inplace=True)
return df_result
def evaluate(self, actual_all, pred_all):
"""Evaluate the prediction result between actual and predicted value
Acquiring the sales value from test data (actual value).
Then, with the predicted sakes value, we evaluate the prediction error.
Parameters
----------
actual_all : object
Dataframe of test data with sales
pred_all : object
Dataframe of predicted sales
"""
results = []
for district, actual in actual_all.groupby("District"):
pred = pred_all[pred_all["District"]==district][self.meta["var"]]
inds = actual[self.meta["var"]].notnull()
metrics = self.models[district][0].evaluate(actual[self.meta["var"]][inds], pred[inds])
results.append((district, metrics[Config.MODELLING_CONFIG["METRIC_BEST"]]))
return pd.DataFrame.from_records(results, columns=["District", "Metric_new"])
def knn_impute(self, data, k=None):
"""KNN imputation on missing values
KNN imputation will utilize nearby columns as input parameters for imputation on missing cells;
However, KNN imputation is very time-exhastive method.
Parameters
----------
data : str
Any dataframe
dataframe_in : boolean (default=True)
Option to select whether to do KNN-imputation to whole dataframe, or just specific column
col : str
If the "dataframe_in" = False, then we need to put in a column to perform specific column imputation
near_neigh : int (default=3)
Number of nearby columns to be used as input for KNN imputation
Returns
-------
array : object
Dataframe with KNN imputation on columns
"""
if k == None:
k = self.MODELLING_CONFIG["KNN_NEIGHBOUR"]
# data = data.dropna(thresh=0.7*len(data), axis=1)
encoding = LabelEncoder() if self.MODELLING_CONFIG["ENCODING_ALG"].upper() == 'ORDINAL' else OneHotEncoder()
data = data.ffill().bfill()
data = encoding.fit_transform(data.values)
data = data.dropna(axis=1, how="all", thresh=(data.shape[0])*self.MODELLING_CONFIG["IMPUTE_MISSING_PERCENT_THRES"])
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data.values)
knn_imputer = KNNImputer(n_neighbors=k)
scaled_data = knn_imputer.fit_transform(scaled_data)
scaled_data = scaler.inverse_transform(scaled_data)
scaled_data = encoding.inverse_transform(scaled_data)
return scaled_data
def get_results(self):
"""get results for metric"""
results_list = []
for district in self.models:
# # loop over the models per district
for model in self.models[district]:
# # loop over the metrics
for m, val in model.metrics.items():
result = dict(Algorithm=model.algorithm, District=district, created=model.created,
start_time=model.start_time, end_time=model.end_time, n_records=model.n_records,
metric_name=m, metric_value=val)
results_list.append(result)
self.results = pd.DataFrame(results_list)
fname = os.path.join(Config.FILES["DATA_LOCAL"], "{}{}.csv".format("test_results", self.suffix))
self.results.to_csv(fname)
# set the best algorithm for each item
self.bests = {}
best_district = None
for metric, group_data in self.results.groupby('metric_name'):
if metric == "MAPE":
best_model = group_data.set_index("Algorithm").groupby(['District'])['metric_value'].agg('idxmin').rename("best_model")
best_accuracy = group_data.groupby(['District'])['metric_value'].min().rename("best_accuracy")
else:
best_model = group_data.set_index("Algorithm").groupby(['District'])['metric_value'].agg('idxmax').rename("best_model")
best_accuracy = group_data.groupby(['District'])['metric_value'].max().rename("best_accuracy")
self.bests[metric] = pd.concat([best_model, best_accuracy], axis = 1)
if self.best_district != []:
self.bests[metric] = self.bests[metric].loc[self.best_district]
def ADF_Stationarity_Test(self, time_series, print_results=True):
"""Augmented Dickey-Fuller (ADF) test
Dickey-Fuller test is a type of unit root test. Unit root are a cause for non-stationary,
the ADF test will test test if unit root is present.
.. note::
A time series is stationary if a single shift in time doesn't change the time series statistical properties,
in which case unit root does not exist
The Null and Alternate hypothesis of the Augmented Dickey-Fuller test is defined as follow:
* **Null Hypothesis**: There is the presence of a unit root
* **Alternate Hypothesis**: There is not unit root
Parameters
----------
time_series : array-like
An array of time series data to be tested on stationarity
print_results : boolean (default=True)
Option to decide whether to print the results of ADF testing
Returns
-------
p_value :
P-value determined from ADF test
is_stationary :
Boolean on whether the time series is stationary or not
dfResults :
Dictionary of result from ADF testing
"""
adfTest = adfuller(timeseries, autolag='AIC')
self.p_value = adfTest[1]
if (self.p_value < self.MODELLING_CONFIG["SIGNIFICANCE_LEVEL"]):
self.is_stationary = True
else:
self.is_stationary = False
if print_results:
dfResults = | pd.Series(adfTest[0:4], index=['ADF Test Statistic','P-Value','Lags Used','Observations Used']) | pandas.Series |
import sys
sys.path.append("../")
sys.path.append("AIF360/")
import warnings
from sklearn.model_selection import train_test_split
from aif360.datasets import StandardDataset
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools import OptTools
from aif360.algorithms.preprocessing.optim_preproc import OptimPreproc
from aif360.metrics import BinaryLabelDatasetMetric
from fairness_metrics.tot_metrics import TPR, TNR
import numpy as np
warnings.simplefilter("ignore")
np.random.seed(10)
def get_distortion_compas(vold, vnew):
"""Distortion function for the compas dataset. We set the distortion
metric here. See section 4.3 in supplementary material of
http://papers.nips.cc/paper/6988-optimized-pre-processing-for-discrimination-prevention
for an example
Note:
Users can use this as templates to create other distortion functions.
Args:
vold (dict) : {attr:value} with old values
vnew (dict) : dictionary of the form {attr:value} with new values
Returns:
d (value) : distortion value
"""
# Distortion cost
distort = {}
distort['two_year_recid'] = pd.DataFrame(
{'No recid.': [0., 2.],
'Did recid.': [2., 0.]},
index=['No recid.', 'Did recid.'])
distort['age_cat'] = pd.DataFrame(
{'Less than 25': [0., 1., 2.],
'25 to 45': [1., 0., 1.],
'Greater than 45': [2., 1., 0.]},
index=['Less than 25', '25 to 45', 'Greater than 45'])
# distort['length_of_stay'] = pd.DataFrame(
# {'<week': [0., 1., 2.],
# '<3months': [1., 0., 1.],
# '>3 months': [2., 1., 0.]},
# index=['<week', '<3months', '>3 months'])
distort['c_charge_degree'] = pd.DataFrame(
{'M': [0., 2.],
'F': [1., 0.]},
index=['M', 'F'])
distort['priors_count'] = pd.DataFrame(
{'0': [0., 1., 2., 100.],
'1 to 3': [1., 0., 1., 100.],
'More than 3': [2., 1., 0., 100.],
'missing': [0., 0., 0., 1.]},
index=['0', '1 to 3', 'More than 3', 'missing'])
# distort['score_text'] = pd.DataFrame(
# {'Low': [0., 2.,100.],
# 'MediumHigh': [2., 0.,100.],
# 'missing score': [0., 0.,1.]},
# index=['Low', 'MediumHigh','missing score'])
distort['score_text'] = pd.DataFrame(
{'Low': [0., 2.],
'MediumHigh': [2., 0.]},
index=['Low', 'MediumHigh'])
distort['sex'] = pd.DataFrame(
{0.0: [0., 2.],
1.0: [2., 0.]},
index=[0.0, 1.0])
distort['race'] = pd.DataFrame(
{0.0: [0., 2.],
1.0: [2., 0.]},
index=[0.0, 1.0])
total_cost = 0.0
for k in vold:
if k in vnew:
total_cost += distort[k].loc[vnew[k], vold[k]]
return total_cost
default_mappings = {
'label_maps': [{1.0: 'Did recid.', 0.0: 'No recid.'}],
'protected_attribute_maps': [{0.0: 'Male', 1.0: 'Female'},
{1.0: 'Caucasian', 0.0: 'Not Caucasian'}]
}
def default_preprocessing(df):
"""Perform the same preprocessing as the original analysis:
https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
"""
return df[(df.days_b_screening_arrest <= 30)
& (df.days_b_screening_arrest >= -30)
& (df.is_recid != -1)
& (df.c_charge_degree != 'O')
& (df.score_text != 'N/A')]
class CompasDataset_test(StandardDataset):
"""ProPublica COMPAS Dataset.
See :file:`aif360/data/raw/compas/README.md`.
"""
def __init__(
self,
label_name='two_year_recid',
favorable_classes=[0],
protected_attribute_names=[
'sex',
'race'],
privileged_classes=[
['Female'],
['Caucasian']],
instance_weights_name=None,
categorical_features=[
'age_cat',
'c_charge_degree',
'c_charge_desc'],
features_to_keep=[
'sex',
'age',
'age_cat',
'race',
'juv_fel_count',
'juv_misd_count',
'juv_other_count',
'priors_count',
'c_charge_degree',
'c_charge_desc',
'two_year_recid',
'length_of_stay'],
features_to_drop=[],
na_values=[],
custom_preprocessing=default_preprocessing,
metadata=default_mappings):
"""See :obj:`StandardDataset` for a description of the arguments.
Note: The label value 0 in this case is considered favorable (no
recidivism).
Examples:
In some cases, it may be useful to keep track of a mapping from
`float -> str` for protected attributes and/or labels. If our use
case differs from the default, we can modify the mapping stored in
`metadata`:
>>> label_map = {1.0: 'Did recid.', 0.0: 'No recid.'}
>>> protected_attribute_maps = [{1.0: 'Male', 0.0: 'Female'}]
>>> cd = CompasDataset(protected_attribute_names=['sex'],
... privileged_classes=[['Male']], metadata={'label_map': label_map,
... 'protected_attribute_maps': protected_attribute_maps})
Now this information will stay attached to the dataset and can be
used for more descriptive visualizations.
"""
np.random.seed(1)
def quantizePrior1(x):
if x <= 0:
return 0
elif 1 <= x <= 3:
return 1
else:
return 2
def quantizeLOS(x):
if x <= 7:
return 0
if 8 < x <= 93:
return 1
else:
return 2
def group_race(x):
if x == "Caucasian":
return 1.0
else:
return 0.0
filepath = 'compas-test.csv'
df = pd.read_csv(filepath, index_col='id', na_values=[])
df['age_cat'] = df['age_cat'].replace('Greater than 45', 2)
df['age_cat'] = df['age_cat'].replace('25 - 45', 1)
df['age_cat'] = df['age_cat'].replace('Less than 25', 0)
df['score_text'] = df['score_text'].replace('High', 1)
df['score_text'] = df['score_text'].replace('Medium', 1)
df['score_text'] = df['score_text'].replace('Low', 0)
df['priors_count'] = df['priors_count'].apply(
lambda x: quantizePrior1(x))
df['length_of_stay'] = (pd.to_datetime(df['c_jail_out']) -
pd.to_datetime(df['c_jail_in'])).apply(
lambda x: x.days)
df['length_of_stay'] = df['length_of_stay'].apply(
lambda x: quantizeLOS(x))
df = df.loc[~df['race'].isin(
['Native American', 'Hispanic', 'Asian', 'Other']), :]
df['c_charge_degree'] = df['c_charge_degree'].replace({'F': 0, 'M': 1})
df['c_charge_degree'] = df['c_charge_degree'].replace({0: 'F', 1: 'M'})
df1 = df[['priors_count', 'c_charge_degree', 'race',
'age_cat', 'score_text', 'two_year_recid']]
tot = []
for index, row in df1.iterrows():
result = ''
for j in df1.columns:
result = result + str(row[j])
tot.append(result)
df1['tmp_feature'] = tot
df1['mis_prob'] = 0
for i in df1['tmp_feature'].unique():
if 'African' in i and i[-1] == '0' and i[0] == '0':
df1.loc[df1['tmp_feature'] == i, 'mis_prob'] = 0.8
elif 'African' in i and i[0] == '2':
df1.loc[df1['tmp_feature'] == i, 'mis_prob'] = 0.2
else:
df1.loc[df1['tmp_feature'] == i, 'mis_prob'] = 0.05
new_label = []
np.random.seed(10)
for i, j in zip(df1['mis_prob'], df1['priors_count']):
if np.random.binomial(1, i, 1)[0] == 1:
new_label.append(3)
else:
new_label.append(j)
df['priors_count'] = new_label
super(
CompasDataset_test,
self).__init__(
df=df,
label_name=label_name,
favorable_classes=favorable_classes,
protected_attribute_names=protected_attribute_names,
privileged_classes=privileged_classes,
instance_weights_name=instance_weights_name,
categorical_features=categorical_features,
features_to_keep=features_to_keep,
features_to_drop=features_to_drop,
na_values=na_values,
custom_preprocessing=custom_preprocessing,
metadata=metadata)
def load_preproc_data_compas(protected_attributes=None):
def custom_preprocessing(df):
"""The custom pre-processing function is adapted from
https://github.com/fair-preprocessing/nips2017/blob/master/compas/code/Generate_Compas_Data.ipynb
"""
df = df[['age',
'c_charge_degree',
'race',
'age_cat',
'score_text',
'sex',
'priors_count',
'days_b_screening_arrest',
'decile_score',
'is_recid',
'two_year_recid',
'length_of_stay']]
# Indices of data samples to keep
ix = df['days_b_screening_arrest'] <= 30
ix = (df['days_b_screening_arrest'] >= -30) & ix
ix = (df['is_recid'] != -1) & ix
ix = (df['c_charge_degree'] != "O") & ix
ix = (df['score_text'] != 'N/A') & ix
df = df.loc[ix, :]
# Restrict races to African-American and Caucasian
dfcut = df.loc[~df['race'].isin(
['Native American', 'Hispanic', 'Asian', 'Other']), :]
# Restrict the features to use
dfcutQ = dfcut[['sex',
'race',
'age_cat',
'c_charge_degree',
'score_text',
'priors_count',
'is_recid',
'two_year_recid',
'length_of_stay']].copy()
# Quantize priors count between 0, 1-3, and >3
def quantizePrior(x):
if x == 0:
return '0'
elif x == 1:
return '1 to 3'
elif x == 2:
return 'More than 3'
else:
return 'missing'
# Quantize length of stay
def quantizeLOS(x):
if x == 0:
return '<week'
if x == 1:
return '<3months'
else:
return '>3 months'
# Quantize length of stay
def adjustAge(x):
if x == 1:
return '25 to 45'
elif x == 2:
return 'Greater than 45'
elif x == 0:
return 'Less than 25'
# Quantize score_text to MediumHigh
def quantizeScore(x):
if x == 1:
return 'MediumHigh'
else:
return 'Low'
def group_race(x):
if x == "Caucasian":
return 1.0
else:
return 0.0
dfcutQ['priors_count'] = dfcutQ['priors_count'].apply(
lambda x: quantizePrior(x))
dfcutQ['length_of_stay'] = dfcutQ['length_of_stay'].apply(
lambda x: quantizeLOS(x))
dfcutQ['score_text'] = dfcutQ['score_text'].apply(
lambda x: quantizeScore(x))
dfcutQ['age_cat'] = dfcutQ['age_cat'].apply(lambda x: adjustAge(x))
# Recode sex and race
dfcutQ['sex'] = dfcutQ['sex'].replace({'Female': 1.0, 'Male': 0.0})
dfcutQ['race'] = dfcutQ['race'].apply(lambda x: group_race(x))
features = ['two_year_recid', 'race',
'age_cat', 'priors_count', 'c_charge_degree', 'score_text']
# Pass vallue to df
df = dfcutQ[features]
return df
XD_features = [
'age_cat',
'c_charge_degree',
'priors_count',
'race',
'score_text']
D_features = [
'race'] if protected_attributes is None else protected_attributes
Y_features = ['two_year_recid']
X_features = list(set(XD_features) - set(D_features))
categorical_features = [
'age_cat',
'priors_count',
'c_charge_degree',
'score_text']
# privileged classes
all_privileged_classes = {"sex": [1.0],
"race": [1.0]}
# protected attribute maps
all_protected_attribute_maps = {
"sex": {
0.0: 'Male', 1.0: 'Female'}, "race": {
1.0: 'Caucasian', 0.0: 'Not Caucasian'}}
return CompasDataset_test(
label_name=Y_features[0],
favorable_classes=[0],
protected_attribute_names=D_features,
privileged_classes=[all_privileged_classes[x] for x in D_features],
instance_weights_name=None,
categorical_features=categorical_features,
features_to_keep=X_features + Y_features + D_features,
na_values=[],
metadata={'label_maps': [{1.0: 'Did recid.', 0.0: 'No recid.'}],
'protected_attribute_maps': [all_protected_attribute_maps[x]
for x in D_features]},
custom_preprocessing=custom_preprocessing)
privileged_groups = [{'race': 1}]
unprivileged_groups = [{'race': 0}]
dataset_orig_vt = load_preproc_data_compas(['race'])
def default_preprocessing(df):
"""Perform the same preprocessing as the original analysis:
https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
"""
return df[(df.days_b_screening_arrest <= 30)
& (df.days_b_screening_arrest >= -30)
& (df.is_recid != -1)
& (df.c_charge_degree != 'O')
& (df.score_text != 'N/A')]
class CompasDataset_train(StandardDataset):
"""ProPublica COMPAS Dataset.
See :file:`aif360/data/raw/compas/README.md`.
"""
def __init__(
self,
label_name='two_year_recid',
favorable_classes=[0],
protected_attribute_names=[
'sex',
'race'],
privileged_classes=[
['Female'],
['Caucasian']],
instance_weights_name=None,
categorical_features=[
'age_cat',
'c_charge_degree',
'c_charge_desc'],
features_to_keep=[
'sex',
'age',
'age_cat',
'race',
'juv_fel_count',
'juv_misd_count',
'juv_other_count',
'priors_count',
'c_charge_degree',
'c_charge_desc',
'two_year_recid',
'length_of_stay'],
features_to_drop=[],
na_values=[],
custom_preprocessing=default_preprocessing,
metadata=default_mappings):
"""See :obj:`StandardDataset` for a description of the arguments.
Note: The label value 0 in this case is considered favorable (no
recidivism).
Examples:
In some cases, it may be useful to keep track of a mapping from
`float -> str` for protected attributes and/or labels. If our use
case differs from the default, we can modify the mapping stored in
`metadata`:
>>> label_map = {1.0: 'Did recid.', 0.0: 'No recid.'}
>>> protected_attribute_maps = [{1.0: 'Male', 0.0: 'Female'}]
>>> cd = CompasDataset(protected_attribute_names=['sex'],
... privileged_classes=[['Male']], metadata={'label_map': label_map,
... 'protected_attribute_maps': protected_attribute_maps})
Now this information will stay attached to the dataset and can be
used for more descriptive visualizations.
"""
np.random.seed(1)
def quantizePrior1(x):
if x <= 0:
return 0
elif 1 <= x <= 3:
return 1
else:
return 2
def quantizeLOS(x):
if x <= 7:
return 0
if 8 < x <= 93:
return 1
else:
return 2
def group_race(x):
if x == "Caucasian":
return 1.0
else:
return 0.0
filepath = 'compas-train.csv'
df = pd.read_csv(filepath, index_col='id', na_values=[])
df['age_cat'] = df['age_cat'].replace('Greater than 45', 2)
df['age_cat'] = df['age_cat'].replace('25 - 45', 1)
df['age_cat'] = df['age_cat'].replace('Less than 25', 0)
df['score_text'] = df['score_text'].replace('High', 1)
df['score_text'] = df['score_text'].replace('Medium', 1)
df['score_text'] = df['score_text'].replace('Low', 0)
df['priors_count'] = df['priors_count'].apply(
lambda x: quantizePrior1(x))
df['length_of_stay'] = (pd.to_datetime(df['c_jail_out']) -
| pd.to_datetime(df['c_jail_in']) | pandas.to_datetime |
def getMetroStatus():
import http.client, urllib.request, urllib.parse, urllib.error, base64, time
headers = {
# Request headers
'api_key': '6b700f7ea9db408e9745c207da7ca827',}
params = urllib.parse.urlencode({})
try:
conn = http.client.HTTPSConnection('api.wmata.com')
conn.request("GET", "/StationPrediction.svc/json/GetPrediction/All?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
return str(data) #returns the data as a string rather than raw bytes
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def JSONfromMetro(trainString): #converts the string into a dictionary file
import json, re
fixSlash=re.compile(r'\\') #this line and the next remove triple-slashes, which screw up the json module
fixedTrainString=fixSlash.sub('',trainString)
trainJSON=json.loads(fixedTrainString[2:-2]+"}") #slightly adjusts the string to put it in json form
if isinstance(trainJSON,dict) and 'Trains' in trainJSON.keys():
return trainJSON['Trains']
else:
return None
def saveWMATASQL(trainData, engine): #saves the current WMATA data to open engine
import datetime, pandas as pd
#the line below creates a table name starting with WMATA and then containing the date and time information, with each day/hour/minute/second taking two characters
if not isinstance(trainData, list):
return None
DTstring=str(datetime.datetime.now().month)+str(datetime.datetime.now().day).rjust(2,'0')+str(datetime.datetime.now().hour).rjust(2,'0')+str(datetime.datetime.now().minute).rjust(2,'0')+str(datetime.datetime.now().second).rjust(2,'0')
trainFrame=pd.DataFrame('-', index=range(len(trainData)), columns=['DT','Car','Loc','Lin','Des','Min','Gro']) #creates trainFrame, the DataFrame to send to the SQL server
for iter in range(len(trainData)): #for all the trains in trainData
trainFrame.loc[iter]['DT']=DTstring
for colName in ['Car','LocationCode','Line','DestinationCode','Min','Group']: #select the six relevant fields
trainFrame.loc[iter][colName[:3]]=trainData[iter][colName] #and fill in the relevant data
trainFrame.to_sql('WMATAFull', engine, if_exists='append') #send trainFrame to the SQL server
return trainFrame
def lineNextDF(line, destList, arrData):
import pandas as pd
timeString=arrData.DT.iloc[0]
rowName=pd.to_datetime('2016-'+timeString[0]+'-'+timeString[1:3]+' '+timeString[3:5]+':'+timeString[5:7]+':'+timeString[7:])
# names the row as a timestamp with the month day hour minute second
lineStat=pd.DataFrame('-',index=[rowName],columns=line)
for station in line: #repeat the below process for every station on the line
trains2consider=arrData.loc[lambda df: df.Loc==station].loc[lambda df: df.Des.isin(destList)] #pull out the trains at that station heading toward the destinations
if len(trains2consider.index)>0: #If you found a train
if trains2consider.Des.iloc[0] in ['A11','B08','E01','K04']: #the next few lines set the station status to the color and ETA of the first arriving train
lineStat.loc[rowName,station]=trains2consider.Lin.iloc[0].lower()+':'+trains2consider.Min.iloc[0] #if the train is terminating early (at Grovesnor, Silver Spring or Mt Vernon), use lowercase
elif trains2consider.Des.iloc[0]=='E06':
lineStat.loc[rowName,station]='Yl:'+trains2consider.Min.iloc[0]
elif trains2consider.Des.iloc[0]=='A13':
lineStat.loc[rowName,station]='Rd:'+trains2consider.Min.iloc[0]
else:
lineStat.loc[rowName,station]=trains2consider.Lin.iloc[0]+':'+trains2consider.Min.iloc[0] #otherwise use upper
return lineStat
def allLNtoNE(arrData, surgeNum): #all of the lines to the North and East during Surge 4
import pandas as pd
LNlist=[]
for num in range(len(lineList[surgeNum])):
LNlist.append(lineNextDF(lineList[surgeNum][num], NEdestList[surgeNum][num], arrData)) #run for each line and destination
return pd.concat(LNlist, axis=1, join='outer') #then join them all together
def allLNtoSW(arrData, surgeNum): #all of the lines to the South and West during Surge 4
import pandas as pd
LNlist=[]
for num in range(1,1+len(lineList[surgeNum])):
LNlist.append(lineNextDF(lineList[surgeNum][-num][::-1], SWdestList[surgeNum][-num][::-1], arrData)) #run for each line and destination
return pd.concat(LNlist, axis=1, join='outer') #then join them all together
def WMATAtableSQL(timeMin,intervalSec, surgeNum): #records for timeMin minutes, about ever intervalSec seconds
import time, pandas as pd
from sqlalchemy import create_engine
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@team<EMAIL>:5432/WmataData') #opens the engine to WmataData
#creates a list of the table we're creating to add to the index
isStart=True
startTime=time.time()
while time.time()<(startTime+60*timeMin): #runs for timeMin minutes
stepStart=time.time()
WMATAdf=saveWMATASQL(JSONfromMetro(getMetroStatus()),engine) #save the current train data and appends the name to tableList
if isinstance(WMATAdf,pd.DataFrame) and len(WMATAdf.index)>0: #if you got data back
if isStart: #and it's the first row
allLN2NE=allLNtoNE(WMATAdf,surgeNum) #set allLNtoNE equal to the all LineNext to NE data
allLN2SW=allLNtoSW(WMATAdf,surgeNum) #set allLNtoSW equal to the all LineNext to SW data
isStart=False #and the next row will not be the first row
else: #for other rows
allLN2NE=allLN2NE.append(allLNtoNE(WMATAdf,surgeNum)) #append the data
allLN2SW=allLN2SW.append(allLNtoSW(WMATAdf,surgeNum))
stepTime=time.time()-stepStart #calculates the time this step took
if stepTime<intervalSec: #if intervalSec seconds have not passed,
time.sleep(intervalSec-stepTime) #wait until a total of intervalSec have passed
engine.connect().close()
return [allLN2NE, allLN2SW]
def lineNextSQL(line, timeString,destList, engine): #reads the next train to arrive at the stations in line heading toward destList and returns it as a Data Frame
import pandas as pd
from sqlalchemy import create_engine
isEngineNone=(engine is None)
if isEngineNone: #if there's not an engine, make one
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@teamoriginal.ccc95gjlnnnc.us-east-1.rds.amazonaws.com:5432/WmataData')
query='SELECT * FROM "WMATAFull" WHERE "DT"='+"'"+timeString+"';"
arrData= | pd.read_sql(query,engine) | pandas.read_sql |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp= | pd.DataFrame(pred) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..kabam_exe import Kabam
test = {}
class TestKabam(unittest.TestCase):
"""
Unit tests for Kabam model.
: unittest will
: 1) call the setup method,
: 2) then call every method starting with "test",
: 3) then the teardown method
"""
print("kabam unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for Kabam unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open Kabam qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for Kabam unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_kabam_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty kabam object
kabam_empty = Kabam(df_empty, df_empty)
return kabam_empty
def test_ventilation_rate(self):
"""
:description Ventilation rate of aquatic animal
:unit L/d
:expression Kabam Eq. A5.2b (Gv)
:param zoo_wb: wet weight of animal (kg)
:param conc_do: concentration of dissolved oxygen (mg O2/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
try:
#use the zooplankton variables/values for the test
kabam_empty.zoo_wb = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float')
kabam_empty.conc_do = pd.Series([5.0, 10.0, 7.5], dtype='float')
result = kabam_empty.ventilation_rate(kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_pest_uptake_eff_gills(self):
"""
:description Pesticide uptake efficiency by gills
:unit fraction
"expresssion Kabam Eq. A5.2a (Ew)
:param log kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 0.540088, 0.540495], dtype = 'float')
try:
kabam_empty.log_kow = pd.Series(['nan', 5., 6.], dtype = 'float')
kabam_empty.kow = 10.**(kabam_empty.log_kow)
result = kabam_empty.pest_uptake_eff_bygills()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_phytoplankton_k1_calc(self):
"""
:description Uptake rate constant through respiratory area for phytoplankton
:unit: L/kg*d
:expression Kabam Eq. A5.1 (K1:unique to phytoplankton)
:param log kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([1639.34426, 8695.6521, 15267.1755], dtype = 'float')
try:
kabam_empty.log_kow = pd.Series([4., 5., 6.], dtype = 'float')
kabam_empty.kow = 10.**(kabam_empty.log_kow)
result = kabam_empty.phytoplankton_k1_calc(kabam_empty.kow)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_k1_calc(self):
"""
U:description ptake rate constant through respiratory area for aquatic animals
:unit: L/kg*d
:expression Kabam Eq. A5.2 (K1)
:param pest_uptake_eff_bygills: Pesticide uptake efficiency by gills of aquatic animals (fraction)
:param vent_rate: Ventilation rate of aquatic animal (L/d)
:param wet_wgt: wet weight of animal (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 1201.13849, 169.37439], dtype = 'float')
try:
pest_uptake_eff_bygills = pd.Series(['nan', 0.0304414, 0.0361228], dtype = 'float')
vent_rate = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
wet_wgt = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float')
result = kabam_empty.aq_animal_k1_calc(pest_uptake_eff_bygills, vent_rate, wet_wgt)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_water_part_coef(self):
"""
:description Organism-Water partition coefficient (based on organism wet weight)
:unit ()
:expression Kabam Eq. A6a (Kbw)
:param zoo_lipid: lipid fraction of organism (kg lipid/kg organism wet weight)
:param zoo_nlom: non-lipid organic matter (NLOM) fraction of organism (kg NLOM/kg organism wet weight)
:param zoo_water: water content of organism (kg water/kg organism wet weight)
:param kow: octanol-water partition coefficient ()
:param beta: proportionality constant expressing the sorption capacity of NLOM or NLOC to
that of octanol
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([650.87, 11000.76, 165000.64], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_lipid_frac = pd.Series([0.03, 0.04, 0.06], dtype = 'float')
kabam_empty.zoo_nlom_frac = pd.Series([0.10, 0.20, 0.30,], dtype = 'float')
kabam_empty.zoo_water_frac = pd.Series([0.87, 0.76, 0.64], dtype = 'float')
kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float')
beta = 0.35
result = kabam_empty.animal_water_part_coef(kabam_empty.zoo_lipid_frac,
kabam_empty.zoo_nlom_frac,
kabam_empty.zoo_water_frac, beta)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_k2_calc(self):
"""
:description Elimination rate constant through the respiratory area
:unit (per day)
:expression Kabam Eq. A6 (K2)
:param zoo_k1: Uptake rate constant through respiratory area for aquatic animals
:param k_bw_zoo (Kbw): Organism-Water partition coefficient (based on organism wet weight ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([2.5186969, 0.79045921, 0.09252798], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_k1 = pd.Series([1639.34426, 8695.6521, 15267.1755], dtype = 'float')
kabam_empty.k_bw_zoo = pd.Series([650.87, 11000.76, 165000.64], dtype = 'float')
result = kabam_empty.aq_animal_k2_calc(kabam_empty.zoo_k1, kabam_empty.k_bw_zoo)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_grow_rate_const(self):
"""
:description Aquatic animal/organism growth rate constant
:unit (per day)
:expression Kabam Eq. A7.1 & A7.2
:param zoo_wb: wet weight of animal/organism (kg)
:param water_temp: water temperature (degrees C)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = | pd.Series([], dtype='float') | pandas.Series |
import os
import toml
import time
import datetime
import pandas as pd
from bayescache.meters import (
AverageMeter, EpochMeter, LossMeter, PatienceMeter, TimeMeter
)
class OptimizationHistory:
"""Records of the optimization"""
def __init__(self, savepath=None, experiment_name=None,
device=None, dataloader_info=None, seeds=None, rank=0):
self.time_meter = TimeMeter()
self.epoch_meter = EpochMeter()
self.loss_meter = LossMeter()
self.patience_meter = PatienceMeter()
self.top1_train = AverageMeter('Acc@1', ':6.2f')
self.top1_valid = AverageMeter('Acc@1', ':6.24f')
self.savepath = savepath
self.experiment_name = experiment_name
self.device = device
self.dataloader_info = dataloader_info
self.seeds = seeds
self.rank = rank
self.reset()
def reset(self):
self.runtime = []
self.num_epochs = []
self.train_loss = []
self.valid_loss = []
self.stop_epoch = []
def reset_meters(self):
self.time_meter.reset()
self.epoch_meter.reset()
self.loss_meter.reset()
self.patience_meter.reset()
def record_history(self):
self.runtime.append(self.time_meter.get_timings())
self.num_epochs.append(self.epoch_meter.get_counts())
self.train_loss.append(self.loss_meter.get_train_loss())
self.valid_loss.append(self.loss_meter.get_valid_loss())
self.stop_epoch.append(self.patience_meter.get_stop_epoch())
def save_metadata(self):
if self.savepath == None or self.experiment_name == None:
raise ValueError("You must specify a savepath and experiment name to save results.")
now = datetime.datetime.now()
metadata = {
'Title': self.experiment_name,
'Date': now.strftime("%Y-%m-%d"),
'Device': self.device,
'NumEpochs': self.num_epochs,
'StopEpoch': self.stop_epoch,
'Runtime': self.runtime,
'Acc@1TrainAvg': self.top1_train.avg.item(),
'Acc@1ValidAvg': self.top1_valid.avg.item(),
'Dataloaders': self.dataloader_info,
'Seeds': self.seeds
}
meta_dir = os.path.join(self.savepath, 'metadata')
os.makedirs(meta_dir, exist_ok=True)
metafile = os.path.join(meta_dir, f'metadata{self.rank}.toml')
with open(metafile, 'w') as outfile:
toml.dump(metadata, outfile)
def save(self):
self.save_metadata()
# Create separate directories to keep train/valid loss for all ranks
savepath_train = os.path.join(self.savepath, 'trainloss')
savepath_valid = os.path.join(self.savepath, 'validloss')
os.makedirs(savepath_train, exist_ok=True)
os.makedirs(savepath_valid, exist_ok=True)
trainsave = os.path.join(savepath_train, f'trainloss{self.rank}.csv')
validsave = os.path.join(savepath_valid, f'validloss{self.rank}.csv')
trainloss_df = | pd.DataFrame({'loss': self.train_loss[0]}) | pandas.DataFrame |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
| tm.assert_index_equal(result, orig) | pandas.util.testing.assert_index_equal |
import pandas as pd
import numpy as np
import json
from tqdm import tqdm
from scipy.optimize import minimize
from utils import get_next_gw, time_decay
from ranked_probability_score import ranked_probability_score, match_outcome
class Bradley_Terry:
""" Model game outcomes using logistic distribution """
def __init__(
self,
games,
threshold=0.1,
scale=1,
parameters=None,
decay=True):
"""
Args:
games (pd.DataFrame): Finished games to used for training.
threshold (float): Threshold to differentiate team performances
scale (float): Variance of strength ratings
parameters (array): Initial parameters to use
decay (boolean): Apply time decay
"""
self.games = games.loc[:, [
"score1", "score2", "team1", "team2", "date"]]
self.games = self.games.dropna()
self.games["date"] = pd.to_datetime(self.games["date"])
self.games["days_since"] = (
self.games["date"].max() - self.games["date"]).dt.days
self.games["weight"] = (
time_decay(0.0026, self.games["days_since"]) if decay else 1)
self.decay = decay
self.games["score1"] = self.games["score1"].astype(int)
self.games["score2"] = self.games["score2"].astype(int)
self.teams = np.sort(np.unique(self.games["team1"]))
self.league_size = len(self.teams)
self.threshold = threshold
self.scale = scale
# Initial parameters
if parameters is None:
self.parameters = np.concatenate((
np.random.uniform(0, 1, (self.league_size)), # Strength
[.1], # Home advantage
))
else:
self.parameters = parameters
def likelihood(self, parameters, games):
""" Perform sample prediction and compare with outcome
Args:
parameters (pd.DataFrame): Current estimate of the parameters
games (pd.DataFrame): Fixtures
Returns:
(float): Likelihood of the estimated parameters
"""
parameter_df = (
pd.DataFrame()
.assign(rating=parameters[:self.league_size])
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(
games,
parameter_df,
left_on='team1',
right_on='team')
.rename(columns={"rating": "rating1"})
.merge(parameter_df, left_on='team2', right_on='team')
.rename(columns={"rating": "rating2"})
.drop("team_y", axis=1)
.drop("team_x", axis=1)
)
outcome = match_outcome(fixtures_df)
outcome_ma = np.ones((fixtures_df.shape[0], 3))
outcome_ma[np.arange(0, fixtures_df.shape[0]), outcome] = 0
odds = np.zeros((fixtures_df.shape[0], 3))
odds[:, 0] = (
1 / (1 + np.exp(
-(
fixtures_df["rating1"] + parameters[-1] -
fixtures_df["rating2"] - self.threshold
) / self.scale)
)
)
odds[:, 2] = (
1 / (1 + np.exp(
-(
fixtures_df["rating2"] - parameters[-1] -
fixtures_df["rating1"] - self.threshold
) / self.scale)
)
)
odds[:, 1] = 1 - odds[:, 0] - odds[:, 2]
return - np.power(
np.ma.masked_array(odds, outcome_ma),
np.repeat(
np.array(fixtures_df["weight"].values).reshape(-1, 1),
3,
axis=1)
).sum()
def maximum_likelihood_estimation(self):
"""
Maximum likelihood estimation of the model parameters for team
strengths and the home field advantage.
"""
# Set strength ratings to have unique set of values for reproducibility
constraints = [{
"type": "eq",
"fun": lambda x:
sum(x[: self.league_size]) - self.league_size
}]
# Set the maximum and minimum values the parameters can take
bounds = [(0, 3)] * self.league_size
bounds += [(0, 1)]
self.solution = minimize(
self.likelihood,
self.parameters,
args=self.games,
constraints=constraints,
bounds=bounds,
options={'disp': False, 'maxiter': 100})
self.parameters = self.solution["x"]
def predict(self, games):
""" Predict score for several fixtures
Args:
games (pd.DataFrame): Fixtures
Returns:
pd.DataFrame: Fixtures with appended odds
"""
parameter_df = (
pd.DataFrame()
.assign(rating=self.parameters[:self.league_size])
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='team1', right_on='team')
.rename(columns={"rating": "rating1"})
.merge(parameter_df, left_on='team2', right_on='team')
.rename(columns={"rating": "rating2"})
.drop("team_y", axis=1)
.drop("team_x", axis=1)
.assign(home_adv=self.parameters[-1])
)
def synthesize_odds(row):
""" Lambda function that parses row by row to compute score matrix
Args:
row (array): Fixture
Returns:
(tuple): Home and Away win and clean sheets odds
"""
home_win_p = (
1 / (
1 + np.exp(
-(
row["rating1"] + row["home_adv"] -
row["rating2"] - self.threshold) / self.scale
)
)
)
away_win_p = (
1 / (
1 + np.exp(
-(
row["rating2"] - row["home_adv"] -
row["rating1"] - self.threshold) / self.scale
)
)
)
draw_p = 1 - home_win_p - away_win_p
return home_win_p, draw_p, away_win_p
(
fixtures_df["home_win_p"],
fixtures_df["draw_p"],
fixtures_df["away_win_p"]
) = zip(*fixtures_df.apply(
lambda row: synthesize_odds(row), axis=1))
return fixtures_df
def evaluate(self, games):
""" Evaluate the model's prediction accuracy
Args:
games (pd.DataFrame): Fixtured to evaluate on
Returns:
pd.DataFrame: df with appended metrics
"""
fixtures_df = self.predict(games)
fixtures_df["winner"] = match_outcome(fixtures_df)
fixtures_df["rps"] = fixtures_df.apply(
lambda row: ranked_probability_score(
[row["home_win_p"], row["draw_p"],
row["away_win_p"]], row["winner"]), axis=1)
return fixtures_df
def backtest(
self,
train_games,
test_season,
path='',
cold_start=False,
save=True):
""" Test the model's accuracy on past/finished games by iteratively
training and testing on parts of the data.
Args:
train_games (pd.DataFrame): All the training samples
test_season (int): Season to use a test set
path (string): Path extension to adjust to ipynb use
cold_start (boolean): Resume training with random parameters
save (boolean): Save predictions to disk
Returns:
(float): Evaluation metric
"""
# Get training data
self.train_games = train_games
# Initialize model
self.__init__(self.train_games[
self.train_games['season'] != test_season],
decay=self.decay)
# Initial train on past seasons
self.maximum_likelihood_estimation()
# Get test data
# Separate testing based on per GW intervals
fixtures = (
pd.read_csv(
f"{path}data/fpl_official/vaastav/data/2021-22/fixtures.csv")
.loc[:, ['event', 'kickoff_time']])
fixtures["kickoff_time"] = (
pd.to_datetime(fixtures["kickoff_time"]).dt.date)
# Get only EPL games from the test season
self.test_games = (
self.train_games
.loc[self.train_games['league_id'] == 2411]
.loc[self.train_games['season'] == test_season]
.dropna()
)
self.test_games["kickoff_time"] = (
pd.to_datetime(self.test_games["date"]).dt.date)
# Merge on date
self.test_games = pd.merge(
self.test_games,
fixtures,
left_on='kickoff_time',
right_on='kickoff_time')
# Add the home team and away team index for running inference
idx = (
| pd.DataFrame() | pandas.DataFrame |
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyfora.pandas_util
import pyfora.algorithms
import pyfora.algorithms.LinearRegression as LinearRegression
import pyfora.pure_modules.pure_pandas as PurePandas
import numpy
import pandas
import pandas.util.testing
import random
class InMemoryPandasTestCases(object):
def checkFramesEqual(self, df1, df2):
pandas.util.testing.assert_frame_equal(df1, df2)
return True
def checkSeriesEqual(self, series1, series2):
pandas.util.testing.assert_series_equal(series1, series2)
return True
def test_pandas_series_basic(self):
s = pandas.Series(range(10))
def f():
return s
self.equivalentEvaluationTest(f)
def test_repeated_dataframe_ctor(self):
df = pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]})
def f():
return pandas.DataFrame(df)
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
def test_repeated_series_ctor(self):
s = pandas.Series([1,2,3])
def f():
return pandas.Series(s)
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkSeriesEqual
)
def test_pandas_dataframes_basic(self):
df = pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]})
def f():
return df
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
def test_pandas_series_indexing_1(self):
s = pandas.Series(4)
def f(ix):
return s.iloc[ix]
for ix in range(-len(s), len(s)):
self.equivalentEvaluationTest(
f,
ix,
comparisonFunction=lambda x, y: x == y
)
def test_pandas_dataframe_indexing_1(self):
df = pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]})
def f(ix, jx):
return df.iloc[ix, jx]
for ix in range(-df.shape[0], df.shape[0]):
for jx in range(-df.shape[1], df.shape[1]):
self.equivalentEvaluationTest(
f, ix, jx,
comparisonFunction=lambda x, y: int(x) == int(y)
)
def test_pandas_dataframe_indexing_2(self):
df = pandas.DataFrame({'A': [1,2], 'B': [5,6]})
def f(ix1, ix2, jx):
return df.iloc[ix1:ix2, jx]
ixes = range(-df.shape[0], df.shape[1]) + [None]
jxes = range(-df.shape[1], df.shape[1])
for ix1 in ixes:
for ix2 in ixes:
for jx in jxes:
self.equivalentEvaluationTest(
f, ix1, ix2, jx,
comparisonFunction=lambda x, y: list(x) == list(y)
)
def test_pandas_dataframe_indexing_3(self):
# due to some hashing stuff, this test will fail if
# key 'D' is replaced by 'C'. Ehh ...
df = pandas.DataFrame({'A': range(5), 'B': range(5,10), 'D': range(10,15)})
def f():
return df.iloc[:,:-1]
def g():
return df.iloc[:,-1:]
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
self.equivalentEvaluationTest(
g,
comparisonFunction=self.checkFramesEqual
)
def test_pandas_dataframe_indexing_4(self):
df = pandas.DataFrame({'A': range(5), 'B': range(5,10), 'D': range(10,15)})
def f():
return df.iloc[:,:]
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
def test_pandas_dataframe_indexing_5(self):
df = pandas.DataFrame({'A': range(5), 'B': range(5,10), 'D': range(10,15)})
def f():
return df.iloc[:,]
self.equivalentEvaluationTest(
f,
comparisonFunction=self.checkFramesEqual
)
def test_pandas_dataframe_indexing_6(self):
df = pandas.DataFrame({'A': range(5), 'B': range(5,10), 'D': range(10,15)})
def f(jx):
return df.iloc[:jx]
for jx in xrange(2, 5, 2):
self.equivalentEvaluationTest(
f, jx,
comparisonFunction=self.checkFramesEqual
)
def test_pandas_shape(self):
df = pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]})
self.equivalentEvaluationTest(lambda: df.shape)
def test_pandas_dataframe_ctor_1(self):
items = [('A', [1,2,3]), ('B', [4,5,6])]
self.equivalentEvaluationTest(
lambda: pandas.DataFrame(dict(items)),
comparisonFunction=self.checkFramesEqual
)
def test_pandas_dataframe_ctor_2(self):
# NOTE: this form breaks the pandas API
col1 = [1,2,3]
col2 = [4,5,6]
data = [col1, col2]
res = self.evaluateWithExecutor(
lambda: pandas.DataFrame(data)
)
self.checkFramesEqual(
res,
pandas.DataFrame({
'C0': col1,
'C1': col2
})
)
def test_pandas_dataframe_class(self):
self.equivalentEvaluationTest(
lambda: pandas.DataFrame,
comparisonFunction=lambda x, y: x == y
)
def test_pandas_read_csv_1(self):
# there's some weirdness with whitspace that we have to deal
# with, on the fora side. For example, after indenting all the
# lines of s here, the read csv will miss the first line
# o_O
s = """
A,B,C
1,2,3
4,5,6
7,8,9
10,11,12
"""
res = self.evaluateWithExecutor(
lambda: pyfora.pandas_util.read_csv_from_string(s)
)
self.checkFramesEqual(
res,
pandas.DataFrame(
{
'A': [1,4,7,10],
'B': [2,5,8,11],
'C': [3,6,9,12]
},
dtype=float
)
)
def test_pandas_read_csv_2(self):
# there's some weirdness with whitspace that we have to deal
# with, on the fora side. For example, after indenting all the
# lines of s here, the read csv will miss the first line
# o_O
s = """
A,B,C
1,2,3
4,notAFloat,6
7,8,9
10,11,12
"""
def f():
try:
return pyfora.pandas_util.read_csv_from_string(s)
except Exception as e:
return e
res = self.evaluateWithExecutor(f)
self.assertIsInstance(res, Exception)
def test_pandas_read_csv_from_s3(self):
s = """
A,B,C
1,2,3
4,5,6
7,8,9
10,11,12
"""
with self.create_executor() as executor:
s3 = self.getS3Interface(executor)
key = "test_pandas_read_csv_from_s3_key"
s3().setKeyValue("bucketname", key, s)
remoteCsv = executor.importS3Dataset("bucketname", key).result()
with executor.remotely.downloadAll():
df = pyfora.pandas_util.read_csv_from_string(remoteCsv)
self.checkFramesEqual(
df,
pandas.DataFrame(
{
'A': [1,4,7,10],
'B': [2,5,8,11],
'C': [3,6,9,12]
},
dtype=float
)
)
def pyfora_linear_regression_test(self):
random.seed(42)
nRows = 100
x_col_1 = []
x_col_2 = []
y_col = []
for _ in range(nRows):
x1 = random.uniform(-10, 10)
x2 = random.uniform(-10, 10)
noise = random.uniform(-1, 1)
y = x1 * 5 + x2 * 2 - 8 + noise
x_col_1.append(x1)
x_col_2.append(x2)
y_col.append(y)
def computeCoefficients():
predictors = PurePandas.PurePythonDataFrame([x_col_1, x_col_2], ["x1", "x2"])
responses = PurePandas.PurePythonDataFrame([y_col], ["y"])
return LinearRegression.linearRegression(predictors, responses)
res_python = computeCoefficients()
res_pyfora = self.evaluateWithExecutor(computeCoefficients)
self.assertArraysAreAlmostEqual(res_python, res_pyfora)
df_x = pandas.DataFrame({
'x1': x_col_1,
'x2': x_col_2
})
df_y = pandas.DataFrame({
'y': y_col
})
res_pandas = LinearRegression.linearRegression(df_x, df_y)
self.assertArraysAreAlmostEqual(res_python, res_pandas)
# verified using sklearn.linear_model.LinearRegression, on nRows = 100
res_scikit = numpy.array([[4.96925412, 2.00279298, -7.98208391]])
self.assertArraysAreAlmostEqual(res_python, res_scikit)
def test_pyfora_linear_regression_1(self):
self.pyfora_linear_regression_test()
def test_pyfora_linear_regression_with_splitting(self):
# note: the right way to do this is to expose _splitLimit
# as an argument to LinearRegression.linearRegression, but a
# lack of named arguments in pyfora means that the code
# would be slightly more verbose than it should need be.
oldSplitLimit = LinearRegression._splitLimit
try:
LinearRegression._splitLimit = 10
self.pyfora_linear_regression_test()
finally:
LinearRegression._splitLimit = oldSplitLimit
def test_series_sort_values(self):
s = pandas.Series([5,5,2,2,1,2,3,4,2,3,1,5])
def f():
return list(s.sort_values().values)
self.equivalentEvaluationTest(
f,
comparisonFunction=lambda x, y: all(map(lambda v:v[0]==v[1], zip(x, y)))
)
def test_series_unique(self):
s = pandas.Series([5,5,2,2,1,2,3,4,2,3,1,5])
def f():
return sorted(list(s.unique()))
self.equivalentEvaluationTest(
f,
comparisonFunction=lambda x, y: all(map(lambda v:v[0]==v[1], zip(x, y)))
)
def test_dataframe_pyfora_addColumn(self):
d = {'A': [1,2,3,4], 'B': [5,6,7,8]}
df = pandas.DataFrame(d)
c = range(8, 12)
def f():
return df.pyfora_addColumn('C', c)
newDict = d.copy()
newDict['C'] = c
self.checkFramesEqual(
self.evaluateWithExecutor(f),
pandas.DataFrame(newDict)
)
def test_series_isinstance(self):
s = pandas.Series([1,2,3,4])
def f():
return isinstance(s, list)
self.equivalentEvaluationTest(f)
def test_dataframe_as_matrix(self):
df = | pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]}) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
v1 = df._mgr.arrays[1]
v2 = df._mgr.arrays[2]
tm.assert_extension_array_equal(v1, v2)
v1base = v1._data.base
v2base = v2._data.base
assert v1base is None or (id(v1base) != id(v2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_setitem_complete_column_with_array(self):
# GH#37954
df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]})
arr = np.array([[1, 1], [3, 1], [5, 1]])
df[["c", "d"]] = arr
expected = DataFrame(
{
"a": ["one", "two", "three"],
"b": [1, 2, 3],
"c": [1, 3, 5],
"d": [1, 1, 1],
}
)
expected["c"] = expected["c"].astype(arr.dtype)
expected["d"] = expected["d"].astype(arr.dtype)
assert expected["c"].dtype == arr.dtype
assert expected["d"].dtype == arr.dtype
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_setitem_bool_with_numeric_index(self, dtype):
# GH#36319
cols = Index([1, 2, 3], dtype=dtype)
df = DataFrame(np.random.randn(3, 3), columns=cols)
df[False] = ["a", "b", "c"]
expected_cols = Index([1, 2, 3, False], dtype=object)
if dtype == "f8":
expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
tm.assert_index_equal(df.columns, expected_cols)
@pytest.mark.parametrize("indexer", ["B", ["B"]])
def test_setitem_frame_length_0_str_key(self, indexer):
# GH#38831
df = DataFrame(columns=["A", "B"])
other = DataFrame({"B": [1, 2]})
df[indexer] = other
expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
expected["A"] = expected["A"].astype("object")
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns(self, using_array_manager):
# GH#15695
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
df.loc[0, "A"] = (0, 3)
df.loc[:, "B"] = (1, 4)
df["C"] = (2, 5)
expected = DataFrame(
[
[0, 1, 2, 3, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
],
dtype="object",
)
if using_array_manager:
# setitem replaces column so changes dtype
expected.columns = cols
expected["C"] = expected["C"].astype("int64")
# TODO(ArrayManager) .loc still overwrites
expected["B"] = expected["B"].astype("int64")
else:
# set these with unique columns to be extra-unambiguous
expected[2] = expected[2].astype(np.int64)
expected[5] = expected[5].astype(np.int64)
expected.columns = cols
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns_size_mismatch(self):
# GH#39510
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
with pytest.raises(ValueError, match="Columns must be same length as key"):
df[["A"]] = (0, 3, 5)
df2 = df.iloc[:, :3] # unique columns
with pytest.raises(ValueError, match="Columns must be same length as key"):
df2[["A"]] = (0, 3, 5)
@pytest.mark.parametrize("cols", [["a", "b", "c"], ["a", "a", "a"]])
def test_setitem_df_wrong_column_number(self, cols):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=cols)
rhs = DataFrame([[10, 11]], columns=["d", "e"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df["a"] = rhs
def test_setitem_listlike_indexer_duplicate_columns(self):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
df[["a", "b"]] = rhs
expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
df[["c", "b"]] = rhs
expected = DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_setitem_listlike_indexer_duplicate_columns_not_equal_length(self):
# GH#39403
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11]], columns=["a", "b"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df[["a", "b"]] = rhs
def test_setitem_intervals(self):
df = DataFrame({"A": range(10)})
ser = cut(df["A"], 5)
assert isinstance(ser.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainder are converted to in-line objects
# containing an IntervalIndex.values
df["B"] = ser
df["C"] = np.array(ser)
df["D"] = ser.values
df["E"] = np.array(ser.values)
df["F"] = ser.astype(object)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
# These go through the Series constructor and so get inferred back
# to IntervalDtype
assert is_interval_dtype(df["C"])
assert is_interval_dtype(df["E"])
# But the Series constructor doesn't do inference on Series objects,
# so setting df["F"] doesn't get cast back to IntervalDtype
assert is_object_dtype(df["F"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B))
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.C), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"])
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"])
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_setitem_categorical(self):
# GH#35369
df = DataFrame({"h": Series(list("mn")).astype("category")})
df.h = df.h.cat.reorder_categories(["n", "m"])
expected = DataFrame(
{"h": Categorical(["m", "n"]).reorder_categories(["n", "m"])}
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_empty_listlike(self):
# GH#17101
index = Index([], name="idx")
result = DataFrame(columns=["A"], index=index)
result["A"] = []
expected = DataFrame(columns=["A"], index=index)
tm.assert_index_equal(result.index, expected.index)
@pytest.mark.parametrize(
"cols, values, expected",
[
(["C", "D", "D", "a"], [1, 2, 3, 4], 4), # with duplicates
(["D", "C", "D", "a"], [1, 2, 3, 4], 4), # mixed order
(["C", "B", "B", "a"], [1, 2, 3, 4], 4), # other duplicate cols
(["C", "B", "a"], [1, 2, 3], 3), # no duplicates
(["B", "C", "a"], [3, 2, 1], 1), # alphabetical order
(["C", "a", "B"], [3, 2, 1], 2), # in the middle
],
)
def test_setitem_same_column(self, cols, values, expected):
# GH#23239
df = DataFrame([values], columns=cols)
df["a"] = df["a"]
result = df["a"].values[0]
assert result == expected
def test_setitem_multi_index(self):
# GH#7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"]
cols = MultiIndex.from_product(it)
index = date_range("20141006", periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df["jim"] = df["jolie"].loc[i, ::-1]
tm.assert_frame_equal(df["jim"], df["jolie"])
np.random.shuffle(j)
df[("joe", "first")] = df[("jolie", "last")].loc[i, j]
tm.assert_frame_equal(df[("joe", "first")], df[("jolie", "last")])
np.random.shuffle(j)
df[("joe", "last")] = df[("jolie", "first")].loc[i, j]
tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")])
@pytest.mark.parametrize(
"columns,box,expected",
[
(
["A", "B", "C", "D"],
7,
DataFrame(
[[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "D"],
[7, 8],
DataFrame(
[[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "B", "C"],
np.array([7, 8, 9], dtype=np.int64),
DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]),
),
(
["B", "C", "D"],
[[7, 8, 9], [10, 11, 12], [13, 14, 15]],
DataFrame(
[[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "A", "D"],
np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64),
DataFrame(
[[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "C"],
DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]),
DataFrame(
[[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"]
),
),
],
)
def test_setitem_list_missing_columns(self, columns, box, expected):
# GH#29334
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"])
df[columns] = box
tm.assert_frame_equal(df, expected)
def test_setitem_list_of_tuples(self, float_frame):
tuples = list(zip(float_frame["A"], float_frame["B"]))
float_frame["tuples"] = tuples
result = float_frame["tuples"]
expected = Series(tuples, index=float_frame.index, name="tuples")
tm.assert_series_equal(result, expected)
def test_setitem_iloc_generator(self):
# GH#39614
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer] = 1
expected = DataFrame({"a": [1, 1, 1], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
def test_setitem_iloc_two_dimensional_generator(self):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer, 1] = 1
expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
class TestSetitemTZAwareValues:
@pytest.fixture
def idx(self):
naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B")
idx = naive.tz_localize("US/Pacific")
return idx
@pytest.fixture
def expected(self, idx):
expected = Series(np.array(idx.tolist(), dtype="object"), name="B")
assert expected.dtype == idx.dtype
return expected
def test_setitem_dt64series(self, idx, expected):
# convert to utc
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
class TestDataFrameSetItemWithExpansion:
# TODO(ArrayManager) update parent (_maybe_update_cacher)
@td.skip_array_manager_not_yet_implemented
def test_setitem_listlike_views(self):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
# get one column as a view of df
ser = df["a"]
# add columns with list-like indexer
df[["c", "d"]] = np.array([[0.1, 0.2], [0.3, 0.4], [0.4, 0.5]])
# edit in place the first column to check view semantics
df.iloc[0, 0] = 100
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
def test_setitem_string_column_numpy_dtype_raising(self):
# GH#39010
df = DataFrame([[1, 2], [3, 4]])
df["0 - Name"] = [5, 6]
expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
tm.assert_frame_equal(df, expected)
def test_setitem_empty_df_duplicate_columns(self):
# GH#38521
df = DataFrame(columns=["a", "b", "b"], dtype="float64")
df.loc[:, "a"] = list(range(2))
expected = DataFrame(
[[0, np.nan, np.nan], [1, np.nan, np.nan]], columns=["a", "b", "b"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_expansion_categorical_dtype(self):
# assignment
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
ser = cut(df.value, range(0, 10500, 500), right=False, labels=labels)
cat = ser.values
# setting with a Categorical
df["D"] = cat
str(df)
result = df.dtypes
expected = Series(
[np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)],
index=["value", "D"],
)
tm.assert_series_equal(result, expected)
# setting with a Series
df["E"] = ser
str(df)
result = df.dtypes
expected = Series(
[
np.dtype("int32"),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False),
],
index=["value", "D", "E"],
)
tm.assert_series_equal(result, expected)
result1 = df["D"]
result2 = df["E"]
tm.assert_categorical_equal(result1._mgr.array, cat)
# sorting
ser.name = "E"
tm.assert_series_equal(result2.sort_index(), ser.sort_index())
def test_setitem_scalars_no_index(self):
# GH#16823 / GH#17894
df = DataFrame()
df["foo"] = 1
expected = DataFrame(columns=["foo"]).astype(np.int64)
tm.assert_frame_equal(df, expected)
def test_setitem_newcol_tuple_key(self, float_frame):
assert (
"A",
"B",
) not in float_frame.columns
float_frame["A", "B"] = float_frame["A"]
assert ("A", "B") in float_frame.columns
result = float_frame["A", "B"]
expected = float_frame["A"]
tm.assert_series_equal(result, expected, check_names=False)
def test_frame_setitem_newcol_timestamp(self):
# GH#2155
columns = date_range(start="1/1/2012", end="2/1/2012", freq=BDay())
data = DataFrame(columns=columns, index=range(10))
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works, mostly a smoke-test
assert np.isnan(data[ts]).all()
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
# GH#31469
df = DataFrame(np.zeros((100, 1)))
df[-4:] = 1
arr = np.zeros((100, 1))
arr[-4:] = 1
expected = DataFrame(arr)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_broadcasting_rhs(self, n, box, indexer):
# GH#40440
# TODO: Add pandas array as box after GH#40933 is fixed
df = DataFrame([[1, 3, 5]] + [[2, 4, 6]] * n, columns=["a", "b", "c"])
indexer(df)[1:] = box([10, 11, 12])
expected = DataFrame([[1, 3, 5]] + [[10, 11, 12]] * n, columns=["a", "b", "c"])
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_broadcasting_rhs_mixed_dtypes(self, n, box, indexer):
# GH#40440
# TODO: Add pandas array as box after GH#40933 is fixed
df = DataFrame(
[[1, 3, 5], ["x", "y", "z"]] + [[2, 4, 6]] * n, columns=["a", "b", "c"]
)
indexer(df)[1:] = box([10, 11, 12])
expected = DataFrame(
[[1, 3, 5]] + [[10, 11, 12]] * (n + 1),
columns=["a", "b", "c"],
dtype="object",
)
tm.assert_frame_equal(df, expected)
class TestDataFrameSetItemCallable:
def test_setitem_callable(self):
# GH#12533
df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]})
df[lambda x: "A"] = [11, 12, 13, 14]
exp = DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]})
tm.assert_frame_equal(df, exp)
def test_setitem_other_callable(self):
# GH#13299
def inc(x):
return x + 1
df = DataFrame([[-1, 1], [1, -1]])
df[df > 0] = inc
expected = DataFrame([[-1, inc], [inc, -1]])
tm.assert_frame_equal(df, expected)
class TestDataFrameSetItemBooleanMask:
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
@pytest.mark.parametrize(
"mask_type",
[lambda df: df > np.abs(df) / 2, lambda df: (df > np.abs(df) / 2).values],
ids=["dataframe", "array"],
)
def test_setitem_boolean_mask(self, mask_type, float_frame):
# Test for issue #18582
df = float_frame.copy()
mask = mask_type(df)
# index with boolean mask
result = df.copy()
result[mask] = np.nan
expected = df.copy()
expected.values[np.array(mask)] = np.nan
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.loc])
def test_setitem_boolean_mask_aligning(self, indexer):
# GH#39931
df = DataFrame({"a": [1, 4, 2, 3], "b": [5, 6, 7, 8]})
expected = df.copy()
mask = df["a"] >= 3
indexer(df)[mask] = indexer(df)[mask].sort_values("a")
tm.assert_frame_equal(df, expected)
def test_setitem_mask_categorical(self):
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# changed multiple rows
cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2)
catsf = Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"]
)
idxf = Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
return_value = exp_fancy["cats"].cat.set_categories(
["a", "b", "c"], inplace=True
)
assert return_value is None
mask = df["cats"] == "c"
df[mask] = ["b", 2]
# category c is kept in .categories
tm.assert_frame_equal(df, exp_fancy)
@pytest.mark.parametrize("dtype", ["float", "int64"])
@pytest.mark.parametrize("kwargs", [{}, {"index": [1]}, {"columns": ["A"]}])
def test_setitem_empty_frame_with_boolean(self, dtype, kwargs):
# see GH#10126
kwargs["dtype"] = dtype
df = DataFrame(**kwargs)
df2 = df.copy()
df[df > df2] = 47
tm.assert_frame_equal(df, df2)
def test_setitem_boolean_indexing(self):
idx = list(range(3))
cols = ["A", "B", "C"]
df1 = DataFrame(
index=idx,
columns=cols,
data=np.array(
[[0.0, 0.5, 1.0], [1.5, 2.0, 2.5], [3.0, 3.5, 4.0]], dtype=float
),
)
df2 = DataFrame(index=idx, columns=cols, data=np.ones((len(idx), len(cols))))
expected = DataFrame(
index=idx,
columns=cols,
data=np.array([[0.0, 0.5, 1.0], [1.5, 2.0, -1], [-1, -1, -1]], dtype=float),
)
df1[df1 > 2.0 * df2] = -1
tm.assert_frame_equal(df1, expected)
with pytest.raises(ValueError, match="Item wrong length"):
df1[df1.index[:-1] > 2] = -1
def test_loc_setitem_all_false_boolean_two_blocks(self):
# GH#40885
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": "a"})
expected = df.copy()
indexer = Series([False, False], name="c")
df.loc[indexer, ["b"]] = DataFrame({"b": [5, 6]}, index=[0, 1])
tm.assert_frame_equal(df, expected)
class TestDataFrameSetitemCopyViewSemantics:
def test_setitem_always_copy(self, float_frame):
assert "E" not in float_frame.columns
s = float_frame["A"].copy()
float_frame["E"] = s
float_frame["E"][5:10] = np.nan
assert notna(s[5:10]).all()
def test_setitem_clear_caches(self):
# see GH#304
df = DataFrame(
{"x": [1.1, 2.1, 3.1, 4.1], "y": [5.1, 6.1, 7.1, 8.1]}, index=[0, 1, 2, 3]
)
df.insert(2, "z", np.nan)
# cache it
foo = df["z"]
df.loc[df.index[2:], "z"] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index, name="z")
assert df["z"] is not foo
tm.assert_series_equal(df["z"], expected)
def test_setitem_duplicate_columns_not_inplace(self):
# GH#39510
cols = ["A", "B"] * 2
df = | DataFrame(0.0, index=[0], columns=cols) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 3 22:53:56 2021
@author: afo
"""
import pandas as pd
import lyricsgenius
import json
import os
from os.path import isfile, join
from os import listdir
import re
from rym import rymscraper
# Function to get all the json file names in 3 subdirectories of given rapper - albums, eps, mixtapes
def load_album_names(p, df, to_collect=2):
files_alb = []
files_ep = []
files_mix = []
album_path = p + '/data/' + df.iloc[0,0] +'/albums'
ep_path = p + '/data/' + df.iloc[0,0] +'/eps'
mixtape_path = p + '/data/' + df.iloc[0,0] +'/mixtapes'
if to_collect >= 0:
# Get list of files from albums
files_alb = [f for f in listdir(album_path) if isfile(join(album_path, f))]
files_alb = [s.replace('.json', '') for s in files_alb]
if to_collect >= 1:
# Get list of files from eps
files_ep = [f for f in listdir(ep_path) if isfile(join(ep_path, f))]
files_ep = [s.replace('.json', '') for s in files_ep]
if to_collect >= 2:
# Get list of files from mixtapes
files_mix = [f for f in listdir(mixtape_path) if isfile(join(mixtape_path, f))]
files_mix = [s.replace('.json', '') for s in files_mix]
files = files_alb + files_ep + files_mix # make single list
try :
files.remove('.DS_Store')
except ValueError:
print()
return files
# Function to get the discography from RYM and add to dataframe
def get_album_list(urls, names, name, to_collect=2):
network = rymscraper.RymNetwork() # main scraper object
# If artist url is supplied, take it, otherwise take name
if len(str(urls[name])) > 2:
discography_infos = network.get_discography_infos(url=urls[name], complementary_infos=False)
else:
discography_infos = network.get_discography_infos(name=names[name], complementary_infos=False)
# don't forget to close and quit the browser (prevent memory leaks)
network.browser.close()
network.browser.quit()
print("Discography for: " + names[name] + " gathered successfully!")
# Placeholders for output lists
artist_list = []
album_list = []
date_list = []
type_list = []
print(discography_infos[0]['Artist'])
# If 0 - collect albums, if 1 - albums+eps, if 2 - albums+eps+mixtapes (all)
if to_collect >= 0:
# Loop to get the albums first
for x in discography_infos:
# Both these rappers have accent on "e" which breaks the check by name (as csv file with names in UTF-8)
# So in case of them name is not checked, otherwise it gets checked to be sure it is not wrong artist
# Same logic applies to all three loops
if names[name] == 'Andre 3000' or names[name] == 'Red Cafe':
if x['Category'] == 'Album':
artist_list.append(names[name])
album_list.append(x['Name'])
date_list.append(x['Year'])
type_list.append(x['Category'])
else:
if x['Artist'] == names[name] and x['Category'] == 'Album':
artist_list.append(names[name])
album_list.append(x['Name'])
date_list.append(x['Year'])
type_list.append(x['Category'])
if to_collect >= 1:
# Loop to get the EPs second
for x in discography_infos:
if names[name] == '<NAME>' or names[name] == 'Red Cafe':
if x['Category'] == 'EP':
artist_list.append(names[name])
album_list.append(x['Name'])
date_list.append(x['Year'])
type_list.append(x['Category'])
else:
if x['Artist'] == names[name] and x['Category'] == 'EP':
artist_list.append(names[name])
album_list.append(x['Name'])
date_list.append(x['Year'])
type_list.append(x['Category'])
if to_collect >= 2:
# Loop to get the mixtapes last
for x in discography_infos:
if names[name] == '<NAME>' or names[name] == 'Red Cafe':
if x['Category'] == 'Mixtape':
artist_list.append(names[name])
album_list.append(x['Name'])
date_list.append(x['Year'])
type_list.append(x['Category'])
else:
if x['Artist'] == names[name] and x['Category'] == 'Mixtape':
artist_list.append(names[name])
album_list.append(x['Name'])
date_list.append(x['Year'])
type_list.append(x['Category'])
# All the ouputs collected into df
d = {'Artist': artist_list, 'Album':album_list, 'Year': date_list, 'Type': type_list}
df = pd.DataFrame(d)
df['Year'] = | pd.to_numeric(df['Year']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import pytz
from freezegun import freeze_time
from pandas import Timestamp
from pandas._testing import assert_frame_equal
from wetterdienst.exceptions import StartDateEndDateError
from wetterdienst.metadata.period import Period
from wetterdienst.metadata.resolution import Resolution
from wetterdienst.metadata.timezone import Timezone
from wetterdienst.provider.dwd.observation import (
DwdObservationDataset,
DwdObservationPeriod,
DwdObservationResolution,
)
from wetterdienst.provider.dwd.observation.api import DwdObservationRequest
from wetterdienst.provider.dwd.observation.metadata.parameter import (
DwdObservationParameter,
)
from wetterdienst.settings import Settings
def test_dwd_observation_data_api():
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationParameter.DAILY.PRECIPITATION_HEIGHT],
resolution=Resolution.DAILY,
period=[Period.HISTORICAL, Period.RECENT],
start_date=None,
end_date=None,
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
@pytest.mark.remote
def test_dwd_observation_data_dataset():
"""Request a parameter set"""
expected = DwdObservationRequest(
parameter=["kl"],
resolution="daily",
period=["recent", "historical"],
).filter_by_station_id(station_id=(1,))
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert given == expected
expected = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
).filter_by_station_id(
station_id=(1,),
)
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert expected == given
assert expected.parameter == [
(
DwdObservationDataset.CLIMATE_SUMMARY,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
def test_dwd_observation_data_parameter():
"""Test parameter given as single value without dataset"""
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
request = DwdObservationRequest(
parameter=["climate_summary"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
def test_dwd_observation_data_parameter_dataset_pairs():
"""Test parameters given as parameter - dataset pair"""
request = DwdObservationRequest(
parameter=[("climate_summary", "climate_summary")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
request = DwdObservationRequest(
parameter=[("precipitation_height", "precipitation_more")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.PRECIPITATION_MORE.PRECIPITATION_HEIGHT,
DwdObservationDataset.PRECIPITATION_MORE,
)
]
@pytest.mark.remote
def test_dwd_observation_data_fails():
# station id
assert (
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
period=[DwdObservationPeriod.HISTORICAL],
resolution=DwdObservationResolution.DAILY,
)
.filter_by_station_id(
station_id=["test"],
)
.df.empty
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=["abc"],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_dwd_observation_data_dates():
# time input
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL],
end_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_request_period_historical():
# Historical period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
)
assert request.period == [
Period.HISTORICAL,
]
def test_request_period_historical_recent():
# Historical and recent period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(days=400),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
]
def test_request_period_historical_recent_now():
# Historical, recent and now period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
Period.NOW,
]
@freeze_time(datetime(2022, 1, 29, 1, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_recent_now():
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.RECENT, Period.NOW]
@freeze_time(datetime(2022, 1, 29, 2, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_now():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.NOW]
@freeze_time("2021-03-28T18:38:00+02:00")
def test_request_period_now_fixeddate():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert Period.NOW in request.period
def test_request_period_empty():
# No period (for example in future)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) + pd.Timedelta(days=720),
)
assert request.period == []
@pytest.mark.remote
def test_dwd_observation_data_result_missing_data():
"""Test for DataFrame having empty values for dates where the station should not
have values"""
Settings.tidy = True
Settings.humanize = True
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-27", # few days before official start
end_date="1934-01-04", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
# Leave only one column to potentially contain NaN which is VALUE
df = request.values.all().df.drop("quality", axis=1)
df_1933 = df[df["date"].dt.year == 1933]
df_1934 = df[df["date"].dt.year == 1934]
assert not df_1933.empty and df_1933.dropna().empty
assert not df_1934.empty and not df_1934.dropna().empty
request = DwdObservationRequest(
parameter=DwdObservationParameter.HOURLY.TEMPERATURE_AIR_MEAN_200,
resolution=DwdObservationResolution.HOURLY,
start_date="2020-06-09 12:00:00", # no data at this time (reason unknown)
end_date="2020-06-09 12:00:00",
).filter_by_station_id(
station_id=["03348"],
)
df = request.values.all().df
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["03348"]),
"dataset": pd.Categorical(["temperature_air"]),
"parameter": pd.Categorical(["temperature_air_mean_200"]),
"date": [datetime(2020, 6, 9, 12, 0, 0, tzinfo=pytz.UTC)],
"value": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
"quality": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular():
"""Test for actual values (tabular)"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = False
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 8.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 6.4], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 1008.60], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 0.5], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": pd.to_numeric([pd.NA, 0.7], errors="coerce"),
"tnk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"tgk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular_metric():
"""Test for actual values (tabular) in metric units"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 100.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 640.0], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 100860.0], errors="coerce"),
"tmk": | pd.to_numeric([pd.NA, 273.65], errors="coerce") | pandas.to_numeric |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from collections import OrderedDict
def latex_matrix_string(mean, title,
row_labels, col_labels,
best_bold_row=True, best_bold_column=False):
"""
Latex Matrix String Generator.
Example
-------
mean = [[1, 6, 5, 7], [12, 4, 6, 13], [9, 8, 7, 10]]
print(latex_matrix_string(mean, "Testing Testing", [
"row1", "row2", "row3"], [
"col1", "col2", "col3", "col4"]))
Parameters
----------
mean : array of float array
An array of float arrays containing mean values
title : string
Title string of the table
row_labels : string array
Array of strings for row names
col_labels : string arrays
Array of strings for column names
best_bold_row : boolean
If set to true, the minimum mean entry in each row will
be set to bold.
best_bold_column :
If set to true, the minimum mean entry in each column will
be set to bold.
"""
matrix_string = '''\hline
'''
for i, row in enumerate(mean):
column_string = '''{ |c'''
matrix_string = matrix_string + \
"\\textbf{" + row_labels[i] + "}& " # length of row labels and number of rows must be equal
for j, cell in enumerate(row):
column_string = column_string + '''|c'''
ending_string = ''' & ''' if j < len(row) - 1 else ''' \\\ \hline'''
if best_bold_row and cell == min(
row) and best_bold_column == False:
matrix_string = matrix_string + \
"$\mathbf{" + str(cell) + "}$" + ending_string
elif best_bold_column and cell == min([a[j] for a in mean]) and best_bold_row == False:
matrix_string = matrix_string + \
"$\mathbf{" + str(cell) + "}$" + ending_string
else:
matrix_string = matrix_string + "$" + \
str(cell) + "$" + ending_string
column_string = column_string + '''| }'''
column_label = ""
for column in col_labels:
column_label = column_label + "&\\textbf{" + column + "}"
latex_string1 = '''\\begin{table}[ht]
\centering
\\begin{tabular}
''' + column_string + '''
\hline
''' + column_label + "\\\ [0.1ex]" + '''
''' + matrix_string + '''\end{tabular}
\\\[-1.5ex]
\caption{''' + title + '''}
\end{table}'''
return latex_string1
def latex_matrix_string_mean_error(mean, error, title,
row_labels, col_labels,
best_bold_row=True, best_bold_column=False):
"""
Latex Matrix String Generator.
Example
-------
mean = [[1, 6, 5, 7], [12, 4, 6, 13], [9, 8, 7, 10]]
error = [[2, 6, 1, 5], [4, 8, 2, 3], [1, 4, 8, 2]]
print(latex_matrix_string(mean, error, "Testing Testing", [
"row1", "row2", "row3"], [
"col1", "col2", "col3", "col4"]))
Parameters
----------
mean : array of float array
An array of float arrays containing mean values
error : array of float array
An array of float array containing error values
title : string
Title string of the table
row_labels : string array
Array of strings for row names
col_labels : string arrays
Array of strings for column names
best_bold_row : boolean
If set to true, the minimum mean entry in each row will
be set to bold.
best_bold_column :
If set to true, the minimum mean entry in each column will
be set to bold.
"""
matrix_string = '''\hline
'''
for i, row in enumerate(mean):
column_string = '''{ |c'''
matrix_string = matrix_string + \
"\\textbf{" + row_labels[i] + "}& " # length of row labels and number of rows must be equal
for j, cell in enumerate(row):
column_string = column_string + '''|c'''
ending_string = ''' & ''' if j < len(row) - 1 else ''' \\\ \hline'''
if best_bold_row and cell == min(
row) and best_bold_column == False:
matrix_string = matrix_string + \
"$\mathbf{" + str(cell) + " \pm " + str(error[i][j]) + "}$" + ending_string
elif best_bold_column and cell == min([a[j] for a in mean]) and best_bold_row == False:
matrix_string = matrix_string + \
"$\mathbf{" + str(cell) + " \pm " + str(error[i][j]) + "}$" + ending_string
else:
matrix_string = matrix_string + "$" + \
str(cell) + " \pm " + str(error[i][j]) + "$" + ending_string
column_string = column_string + '''| }'''
column_label = ""
for column in col_labels:
column_label = column_label + "&\\textbf{" + column + "}"
latex_string1 = '''\\begin{table}[ht]
\centering
\\begin{tabular}
''' + column_string + '''
\hline
''' + column_label + "\\\ [0.1ex]" + '''
''' + matrix_string + '''\end{tabular}
\\\[-1.5ex]
\caption{''' + title + '''}
\end{table}'''
return latex_string1
# def plot_over_iterations(x, methods, metric="mean", labels=None, linewidth=3, fontsize_label=25,
# x_label="Error", y_label="Number of iterations", log_y=False, log_x=False,
# title="", legend_loc=1, percentiles=(5, 95), colors=None, plot_legend=True):
# """
# Plots performance over iterations of different methods .
#
# Example:
# ----------------------------
# x = np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4]])
# method_1 = np.array([[1,4,5,2], [3,4,3,6] , [2,5,5,8]])
# method_2 = np.array([[8,7,5,9], [7,3,9,1] , [3,2,9,4]])
# method_3 = np.array([[10,13,9,11], [9,12,10,10] , [11,14,18,6]])
# methods = [method_1, method_2, method_3]
# plot = plot_median(x,methods)
# plot.show()
#
# Parameters:
# ----------
# x : numpy array
# For each curve, contains the x-coordinates. Each entry
# corresponds to one method.
# methods : list of numpy arrays
# A list of numpy arrays of methods. Each method contains a numpy array
# of several run of that corresponding method.
# method_names: List of Strings
# A list of names for the methods
#
# Returns
# -------
# plt : object
# Plot Object
# """
#
# if labels is None:
# labels = ["Method-%d" % i for i in range(len(methods))]
#
# styles = ["o", "D", "s", ">", "<", "^", "v", "*", "*", ".", ",", "1", "2", "3", "4"]
#
# if colors is None:
# colors = ["blue", "green", "purple", "darkorange", "red",
# "palevioletred", "lightseagreen", "brown", "black",
# "firebrick", "cyan", "gold", "slategray"]
#
# for index, method in enumerate(methods):
# style = styles[index % len(styles)]
# color = colors[index % len(colors)]
# if metric == "median":
# plt.plot(x[index], np.median(method, axis=0), label=labels[index], linewidth=linewidth, marker=style,
# color=color)
# elif metric == "mean":
# plt.plot(x[index], np.mean(method, axis=0), label=labels[index], linewidth=linewidth, marker=style,
# color=color)
# elif metric == "median_percentiles":
# plt.plot(x[index], np.median(method, axis=0), label=labels[index], linewidth=linewidth, marker=style,
# color=color)
# plt.fill_between(x[index], np.percentile(method, percentiles[0], axis=0),
# np.percentile(method, percentiles[1], axis=0),
# color=color, alpha=0.2)
# elif metric == "mean_std":
# plt.errorbar(x[index], np.mean(method, axis=0), yerr=np.std(method, axis=0),
# label=labels[index], linewidth=linewidth, marker=style, color=color)
# else:
# raise ValueError("Metric does not exist!")
#
# if plot_legend:
# plt.legend(loc=legend_loc, fancybox=True, framealpha=1, frameon=True, fontsize=fontsize_label)
#
# plt.xlabel(x_label, fontsize=fontsize_label)
# plt.ylabel(y_label, fontsize=fontsize_label)
# plt.grid(True, which='both', ls="-")
# if log_y:
# plt.yscale("log")
# if log_x:
# plt.xscale("log")
#
# plt.title(title, fontsize=fontsize_label)
# return plt
def fill_trajectory(performance_list, time_list, replace_nan=np.NaN):
frame_dict = OrderedDict()
counter = np.arange(0, len(performance_list))
for p, t, c in zip(performance_list, time_list, counter):
if len(p) != len(t):
raise ValueError("(%d) Array length mismatch: %d != %d" %
(c, len(p), len(t)))
frame_dict[str(c)] = | pd.Series(data=p, index=t) | pandas.Series |
import pytd
import os
import logging
import pandas as pd
import time
from dotenv import load_dotenv
from pathlib import Path
class TreasureData:
# Methods to integrate with TreasureData.
def __init__(self) -> None:
# Read environment variables
load_dotenv()
env_path = Path('..')/'.env'
load_dotenv(dotenv_path=env_path)
#Set up Logging
self.logger = logging.getLogger('TreasureData')
logging.basicConfig(filename="tdLogfile.txt")
self.stderrLogger=logging.StreamHandler()
self.stderrLogger.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger().addHandler(self.stderrLogger)
self.logger.setLevel(os.getenv('logLevel'))
self.logger.debug("Class Initialised")
# Treasure Data login vars
self.td_api_server=os.getenv('td_api_server')
self.td_api_key=os.getenv('td_api_key')
self.td_engine_name=os.getenv('td_engine_name')
def connect(self, databaseName):
self.db_client=pytd.Client(database=databaseName)
def execute_sql(self, source_sql):
# Read PII from Source Table and return a pandas dataframe
# Intended for select queries that return a result.
df=""
start_time = time.time()
res = self.db_client.query(source_sql)
self.logger.info("-- Running : [" + source_sql + "]---")
df = pd.DataFrame(**res)
self.logger.info("--- Total read "+str(len(df))+" source rows in : {:.3f} seconds ---".format((time.time() - start_time)))
return df
def create_table(self, tablename, tabledef):
sql='CREATE TABLE IF NOT EXISTS '+ tablename +' ( ' + tabledef + ' )'
self.logger.debug("SQL:" + sql)
self.db_client.query(sql)
def clear_table(self,tablename):
self.db_client.query('DELETE FROM '+ tablename +' WHERE 1=1')
def write_to_table(self, tablename, tableColumns, tableData=[]):
# Writes data to a Treasure Data table.
if len(tableData) > 0:
# Create a dataframe
dest_df= | pd.DataFrame(columns=tableColumns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import time
import requests
from datetime import datetime
from logging import getLogger
from typing import Optional
from typing import Dict
from typing import Iterable
from funcy import compose
from funcy import partial
from pandas import DataFrame
from pandas import to_datetime
from pandas import Series
from pyloniex import PoloniexPublicAPI
from moneybot.clients import Postgres
from moneybot.clients import Poloniex
YEAR_IN_SECS = 60 * 60 * 24 * 365
logger = getLogger(__name__)
def format_time(ts: datetime) -> str:
return ts.strftime('%Y-%m-%d %H:%M:%S')
def historical(ticker: str) -> Dict:
url = f'https://graphs.coinmarketcap.com/currencies/{ticker}'
return requests.get(url).json()
def market_cap(hist_ticker: Dict) -> Series:
r = {}
ts = None
for key, vals in hist_ticker.items():
if ts is None:
ts = [to_datetime(t[0] * 1000000) for t in vals]
r[key] = [t[1] for t in vals]
return | DataFrame(r, index=ts) | pandas.DataFrame |
from datetime import datetime
import os
import re
import numpy as np
import pandas as pd
from fetcher.extras.common import atoi, MaRawData, zipContextManager
from fetcher.utils import Fields, extract_arcgis_attributes, extract_attributes
NULL_DATE = datetime(2020, 1, 1)
DATE = Fields.DATE.name
TS = Fields.TIMESTAMP.name
DATE_USED = Fields.DATE_USED.name
def add_query_constants(df, query):
for k, v in query.constants.items():
df[k] = v
return df
def build_leveled_mapping(mapping):
tab_mapping = {x.split(":")[0]: {} for x in mapping.keys() if x.find(':') > 0}
for k, v in mapping.items():
if k.find(':') < 0:
continue
tab, field = k.split(":")
tab_mapping[tab][field] = v
return tab_mapping
def prep_df(values, mapping):
df = pd.DataFrame(values).rename(columns=mapping).set_index(DATE)
for c in df.columns:
if c.find('status') >= 0:
continue
# convert to numeric
df[c] = pd.to_numeric(df[c])
df.index = pd.to_datetime(df.index, errors='coerce')
return df
def make_cumsum_df(data, timestamp_field=Fields.TIMESTAMP.name):
df = pd.DataFrame(data)
df.set_index(timestamp_field, inplace=True)
df.sort_index(inplace=True)
df = df.select_dtypes(exclude=['string', 'object'])
# .groupby(level=0).last() # can do it here, but not mandatory
cumsum_df = df.cumsum()
cumsum_df[Fields.TIMESTAMP.name] = cumsum_df.index
return cumsum_df
def handle_ak(res, mapping, queries):
tests = res[0]
collected = [x['attributes'] for x in tests['features']]
df = pd.DataFrame(collected)
df = df.pivot(columns='Test_Result', index='Date_Collected')
df.columns = df.columns.droplevel()
df['tests_total'] = df.sum(axis=1)
df = df.rename(columns=mapping).cumsum()
df[TS] = df.index
add_query_constants(df, queries[0])
tagged = df.to_dict(orient='records')
# cases
cases = pd.DataFrame([x['attributes'] for x in res[1]['features']]).rename(columns=mapping)
cases[TS] = pd.to_datetime(cases[TS], unit='ms')
cases = cases.set_index(TS).sort_index().cumsum().resample('1d').ffill()
cases[TS] = cases.index
add_query_constants(cases, queries[1])
tagged.extend(cases.to_dict(orient='records'))
# last item: already cumulative
data = extract_arcgis_attributes(res[2], mapping)
for x in data:
x[DATE_USED] = queries[2].constants[DATE_USED]
tagged.extend(data)
return tagged
def handle_ar(res, mapping):
# simply a cumsum table
data = extract_arcgis_attributes(res[0], mapping)
cumsum_df = make_cumsum_df(data)
return cumsum_df.to_dict(orient='records')
def handle_az(res, mapping, queries):
mapped = []
for i, df in enumerate(res[0]):
# minor cheating for same column names
df.columns = ["{}-{}".format(c, i) for c in df.columns]
df = df.rename(columns=mapping)
df[DATE] = pd.to_datetime(df[DATE])
df = df.set_index(DATE).sort_index().cumsum()
df[TS] = df.index
add_query_constants(df, queries[i])
mapped.extend(df.to_dict(orient='records'))
return mapped
def handle_ca(res, mapping, queries):
# need to cumsum
mapped = []
for query, result in zip(queries, res):
# extract also maps
items = extract_attributes(result, query.data_path, mapping, 'CA')
df = prep_df(items, mapping).sort_index(na_position='first').drop(columns=TS).cumsum()
df = df.loc[df.index.notna()]
add_query_constants(df, query)
df[TS] = df.index
mapped.extend(df.to_dict(orient='records'))
return mapped
def handle_ct(res, mapping, queries):
tests = res[0]
df = pd.DataFrame(tests).rename(columns=mapping).set_index(DATE)
for c in df.columns:
# convert to numeric
df[c] = pd.to_numeric(df[c])
df.index = df.index.fillna(NULL_DATE.strftime(mapping.get('__strptime')))
df = df.sort_index().cumsum()
df[TS] = pd.to_datetime(df.index)
df[TS] = df[TS].values.astype(np.int64) // 10 ** 9
add_query_constants(df, queries[0])
tagged = df.to_dict(orient='records')
# by report
df = res[1].rename(columns=mapping).sort_values('DATE')
add_query_constants(df, queries[1])
df[TS] = df['DATE']
tagged.extend(df.to_dict(orient='records'))
# death + cases
for i, df in enumerate(res[2:]):
df = res[2+i].rename(columns=mapping).set_index('DATE').sort_index().cumsum()
add_query_constants(df, queries[2+i])
df[TS] = df.index
tagged.extend(df.to_dict(orient='records'))
return tagged
def handle_dc(res, mapping, queries):
df = res[0]
# make it pretty
df = df[df['Unnamed: 0'] == 'Testing'].T
df.columns = df.loc['Unnamed: 1']
df = df.iloc[2:]
df.index = pd.to_datetime(df.index, errors='coerce')
df = df.loc[df.index.dropna()].rename(columns=mapping)
add_query_constants(df, queries[0])
df[TS] = df.index
return df.to_dict(orient='records')
def handle_de(res, mapping):
df = res[0]
df['Date'] = pd.to_datetime(df[['Year', 'Month', 'Day']])
df = df[df['Statistic'].isin(mapping.keys())]
# changing the order of operations here is probably better
def prepare_values(df):
df = df.pivot(
index=['Date', 'Date used'], values='Value', columns=['Statistic'])
df[DATE_USED] = df.index.get_level_values(1)
df = df.droplevel(1)
df['Date'] = df.index
df = df.replace(mapping).rename(columns=mapping)
return df.to_dict(orient='records')
# Death
deaths_df = df[(df['Statistic'].str.find('Death') >= 0) & (df['Unit'] == 'people')]
tagged = prepare_values(deaths_df)
# testing
tests_df = df[df['Statistic'].str.find('Test') >= 0]
for x in ['people', 'tests']:
partial = prepare_values(tests_df[tests_df['Unit'] == x])
tagged.extend(partial)
# cases
cases = df[df['Unit'] == 'people'][df['Statistic'].str.find('Cases') >= 0]
partial = prepare_values(cases)
tagged.extend(partial)
return tagged
def handle_fl(res, mapping, queries):
# simply a cumsum table
tagged = []
for i, data in enumerate(res[:-1]):
df = extract_arcgis_attributes(res[i], mapping)
cumsum_df = make_cumsum_df(df)
add_query_constants(cumsum_df, queries[i])
tagged.extend(cumsum_df.to_dict(orient='records'))
# The last item is the aggregated case-line data
df = pd.DataFrame([x['attributes'] for x in res[-1]['features']])
df = df.rename(
columns={**{'EXPR_1': 'Year', 'EXPR_2': 'Month', 'EXPR_3': 'Day'}, **mapping})
df[DATE] = pd.to_datetime(df[['Year', 'Month', 'Day']])
df = df.set_index(DATE).sort_index().cumsum()
add_query_constants(df, queries[-1])
df[TS] = df.index
tagged.extend(df.to_dict(orient='records'))
return tagged
def handle_ga(res, mapping):
tagged = []
file_mapping = build_leveled_mapping(mapping)
with zipContextManager(res[0]) as zipdir:
for filename in file_mapping.keys():
date_fields = [k for k, v in file_mapping[filename].items() if v == 'TIMESTAMP']
df = pd.read_csv(os.path.join(zipdir, filename), parse_dates=date_fields)
df = df[df['county'] == 'Georgia']
by_date = file_mapping[filename].pop(DATE_USED)
df = df.rename(columns=file_mapping[filename])
df[DATE_USED] = by_date
tagged.extend(df.to_dict(orient='records'))
return tagged
def handle_il(res, mapping, queries):
df = res[0].rename(columns=mapping)
df[TS] = df[DATE]
add_query_constants(df, queries[0])
mapped = df.to_dict(orient='records')
# testing
df = pd.DataFrame(res[1].get('test_group_counts')).rename(columns=mapping)
df = df[df['regionID'] == 0]
df[DATE] = pd.to_datetime(df[DATE])
df = df.set_index(DATE).sort_index().cumsum()
df[TS] = df.index
mapped.extend(df.to_dict(orient='records'))
return mapped
def handle_in(res, mapping):
tagged = []
df = prep_df(res[0]['result']['records'], mapping).sort_index().cumsum()
# need to assign dating correctly
assignments = [
('SPECIMENS', 'Specimen Collection'),
('POSITIVE_BY_SPECIMEN', 'Specimen Collection'),
(['POSITIVE', 'TOTAL'], 'Report'),
('DEATH', 'Death'),
]
for key, by_date in assignments:
if isinstance(key, list):
subset = df.filter(key)
else:
subset = df.filter(like=key)
if subset.columns[0] == 'POSITIVE_BY_SPECIMEN':
subset.columns = ['POSITIVE']
subset[DATE_USED] = by_date
subset[TS] = subset.index
tagged.extend(subset.to_dict(orient='records'))
return tagged
def handle_ks(res, mapping, queries):
testing = res[0][0].filter(like='alias')
testing.columns = [c.replace('-alias', '') for c in testing.columns]
testing = testing.rename(columns=mapping).groupby(DATE).last()
testing.index = pd.to_datetime(testing.index)
testing[TS] = testing.index
add_query_constants(testing, queries[0])
return testing.to_dict(orient='records')
def handle_la(res, mapping):
df = res[0].rename(columns=mapping).groupby(DATE).sum()
df = df.sort_index().cumsum()
df[TS] = df.index
df[DATE_USED] = 'Specimen Collection'
return df.to_dict(orient='records')
def handle_ma(res, mapping):
'''Returning a list of dictionaries (records)
'''
tagged = []
# break the mapping to {file -> {mapping}}
# not the most efficient, but the data is tiny
tab_mapping = build_leveled_mapping(mapping)
tabs = MaRawData(res[0])
for tabname in tab_mapping.keys():
df = tabs[tabname].rename(columns=tab_mapping[tabname])
df[DATE] = pd.to_datetime(df[DATE])
# expect it to always exist (we control the file list)
by_date = tab_mapping[tabname].pop(DATE_USED)
df = df[tab_mapping[tabname].values()]
# need to cumsum TestingByDate file
if tabname.startswith('TestingByDate'):
df = df.set_index(DATE).cumsum()
df[DATE] = df.index
df[DATE_USED] = by_date
tagged.extend(df.to_dict(orient='records'))
return tagged
def handle_md(res, mapping, queries):
mapped = extract_arcgis_attributes(res[0], mapping, 'MD')
for x in mapped:
x[DATE_USED] = 'Report'
for i, result in enumerate(res[1:]):
data = extract_arcgis_attributes(result, mapping, 'MD')
cumsum_df = make_cumsum_df(data)
add_query_constants(cumsum_df, queries[i+1])
mapped.extend(cumsum_df.to_dict(orient='records'))
return mapped
def handle_me(res, mapping, queries):
cases = res[0].rename(columns=mapping).groupby(DATE).sum().sort_index().cumsum()
cases[TS] = cases.index
mapped = cases.to_dict(orient='records')
df = res[1].rename(columns=mapping).set_index(DATE)
df['positive'] = df['Positive Tests'].fillna(0) + df['Positive Tests Flexible'].fillna(0)
df = df.pivot(columns='Type', values=['All Tests', 'positive'])
df.columns = df.columns.map(lambda x: "{}-{}".format(x[0], x[1]))
df = df.cumsum().rename(columns=mapping)
add_query_constants(df, queries[1])
df[TS] = df.index
mapped.extend(df.to_dict(orient='records'))
return mapped
def handle_mi(res, mapping):
soup = res[-1]
h = soup.find("h5", string=re.compile('[dD][aA][tT][aA]'))
parent = h.find_parent("ul")
links = parent.find_all("a")
base_url = 'https://www.michigan.gov'
cases_url = base_url + links[1]['href']
tests_url = base_url + links[4]['href']
tagged = []
# cases:
df = pd.read_excel(cases_url, engine='xlrd', parse_dates=['Date'])
df = df.groupby(['Date', 'CASE_STATUS']).sum().filter(like='Cumulative').unstack()
df.columns = df.columns.map("-".join)
like_bydate = [('death', 'Death'), ('Cases', 'Symptom Onset')]
for like, by_date in like_bydate:
foo = df.filter(like=like).rename(columns=mapping)
foo[TS] = foo.index
foo[DATE_USED] = by_date
tagged.extend(foo.to_dict(orient='records'))
# tests
df = pd.read_excel(tests_url, engine='xlrd', parse_dates=['MessageDate'])
df = df.groupby('MessageDate').sum().sort_index().cumsum().rename(columns=mapping)
df[TS] = df.index
tagged.extend(df.to_dict(orient='records'))
return tagged
def handle_mn(res, mapping):
mapped = []
# Soup time. Yummy!
page = res[0]
table_ids = {
'labtable': 'n/a',
'casetable': 'Specimen Collection'
}
for table_id, date_used in table_ids.items():
table = page.find("table", id=table_id)
if not table:
continue
# map table headers to fields
headers = table.find_all('th')
headers = [h.get_text(strip=True) for h in headers]
headers = [mapping.get(h, '') for h in headers]
for tr in table.find_all('tr'):
tds = tr.find_all('td')
if not tds:
# expected for 1st line
continue
td_text = [td.get_text(strip=True) for td in tds]
values = dict(zip(headers, td_text))
if values[DATE].lower().find('unknown') >= 0:
# ignore unknown date for now
continue
row = {}
for k, v in values.items():
if not k:
continue
if k != DATE:
v = atoi(v) if v != '-' else None
row[k] = v
row[DATE_USED] = date_used
mapped.append(row)
return mapped
def handle_mo(res, mapping, queries):
dfs = [
# result index, date index
(res[0], 'Date Reported'),
(res[1], 'Test Date')
]
mapped = []
for i, (mo, date_index) in enumerate(dfs):
mo = mo.pivot(columns='Measure Names', values='Measure Values', index=date_index)
mo = mo.iloc[:-1]
mo[TS] = pd.to_datetime(mo.index)
mo = mo.set_index(TS).sort_index(na_position='first')
dates = pd.date_range(end=mo.index.max(), periods=len(mo.index), freq='d')
mo.index = dates
mo = mo.cumsum().rename(columns=mapping)
add_query_constants(mo, queries[i])
mo[TS] = mo.index
mapped.extend(mo.to_dict(orient='records'))
# death by day of death
df = res[2].rename(columns={'Measure Values': 'DEATH'}).rename(columns=mapping)
df = df[df[TS] != 'All']
df[TS] = pd.to_datetime(df[TS])
# There are dates that are around ~1940, we aggregate all of them to "before 2020"
df = df.set_index(TS)[['DEATH']].groupby(
by=lambda x: x if x >= datetime(2020, 1, 1) else datetime(2020, 1, 1)) \
.sum().sort_index(na_position='first').cumsum()
dates = pd.date_range(start=df.index.min(), end=df.index.max(), freq='d')
df = df.reindex(dates).ffill()
df[TS] = df.index
add_query_constants(df, queries[2])
mapped.extend(df.to_dict(orient='records'))
return mapped
def handle_nc(res, mapping):
tagged = []
for x in res:
tot = x.rename(columns=mapping)
tot = tot.pivot(
columns='Measure Names', values='Measure Values', index=DATE).sort_index()
for c in tot.columns:
tag, dating = mapping.get(c, ":").split(":")
if not tag:
continue
df = pd.DataFrame(tot[c].rename(tag)).fillna(0).sort_index().cumsum()
df[TS] = df.index
df[DATE_USED] = dating
tagged.extend(df.to_dict(orient='records'))
return tagged
def handle_nd(res, mapping):
# simply a cumsum table
res = res[0].rename(columns=mapping)
res = res.groupby(DATE).sum().filter(mapping.values()).cumsum()
res[DATE] = res.index
records = res.to_dict(orient='records')
return records
def handle_nh(res, mapping, queries):
mapped = []
# 1st element: non-cumulative cases
df = res[0].rename(columns=mapping).set_index(TS).sort_index().cumsum()
df[TS] = df.index
add_query_constants(df, queries[0])
mapped.extend(df.to_dict(orient='records'))
for df, query in zip(res[1:], queries[1:]):
df = df.rename(columns=mapping)
add_query_constants(df, query)
mapped.extend(df.to_dict(orient='records'))
return mapped
def handle_nv(res, mapping):
mapped = []
nv = res[0]
tab_mapping = build_leveled_mapping(mapping)
for tab in tab_mapping.keys():
df = nv[tab]
date_used = tab_mapping[tab].pop(DATE_USED)
df.columns = df.iloc[1]
df = df.iloc[2:].rename(columns=tab_mapping[tab]).filter(tab_mapping[tab].values())
df[DATE] = pd.to_datetime(df[DATE], errors='coerce')
df = df[df[DATE].notna()].set_index(DATE).sort_index()
if tab == 'Cases':
df = df.cumsum()
df[DATE_USED] = date_used
df[TS] = df.index
mapped.extend(df.to_dict(orient='records'))
return mapped
def handle_oh(res, mapping):
testing_url = res[0]['url']
df = pd.read_csv(testing_url, parse_dates=['Date'])
df = df.set_index('Date').sort_index().cumsum().rename(columns=mapping)
df[TS] = df.index
df[DATE_USED] = 'Test Result'
tagged = df.to_dict(orient='records')
oh = res[1].iloc[:-1]
oh['Case Count'] = pd.to_numeric(oh['Case Count'])
for x in ['Onset Date', 'Date Of Death']:
oh[x] = pd.to_datetime(oh[x], errors='coerce')
# death
death = oh.groupby('Date Of Death').sum().filter(
like='Death').sort_index().cumsum().rename(columns=mapping)
death[TS] = death.index
death[DATE_USED] = 'Death'
tagged.extend(death.to_dict(orient='records'))
# cases
cases = oh.groupby('Onset Date').sum().filter(
like='Case').sort_index().cumsum().rename(columns=mapping)
cases[TS] = cases.index
cases[DATE_USED] = 'Symptom Onset'
tagged.extend(cases.to_dict(orient='records'))
return tagged
def handle_or(res, mapping, queries):
testing = res[0][0].rename(columns=mapping)
testing[DATE] = pd.to_datetime(testing[DATE], errors='coerce')
testing = testing[testing[DATE].notna()]
testing = testing.pivot(
index=DATE, columns='Test Result-alias', values='SUM(ELR Count)-alias'
).rename(columns=mapping).sort_index().cumsum()
testing[TS] = testing.index
add_query_constants(testing, queries[0])
return testing.to_dict(orient='records')
def handle_pa(res, mapping, queries):
tagged = []
for i, data in enumerate(res):
df = pd.DataFrame(data).rename(columns=mapping).set_index(DATE).sort_index()
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import json
import os
import pandas as pd
import sklearn.datasets
def data(dataset="bio_eventrelated_100hz"):
"""Download example datasets.
Download and load available `example datasets <https://github.com/neuropsychology/NeuroKit/tree/master/data#datasets>`_.
Note that an internet connexion is necessary.
Parameters
----------
dataset : str
The name of the dataset. The list and description is
available `here <https://neurokit2.readthedocs.io/en/master/datasets.html#>`_.
Returns
-------
DataFrame
The data.
Examples
---------
>>> import neurokit2 as nk
>>>
>>> data = nk.data("bio_eventrelated_100hz")
"""
# TODO: one could further improve this function with like
# selectors 'ecg=True, eda=True, restingstate=True' that would
# find the most appropriate dataset
dataset = dataset.lower()
path = "https://raw.githubusercontent.com/neuropsychology/NeuroKit/master/data/"
# Specific requests
if dataset == "iris":
data = sklearn.datasets.load_iris()
return pd.DataFrame(data.data, columns=data["feature_names"])
if dataset in ["eeg", "eeg.txt"]:
return pd.read_csv(path + "eeg.txt").values[:, 0]
# Add extension
if dataset in ["bio_resting_8min_200hz"]:
dataset += ".json"
# Specific case for json file
if dataset.endswith(".json"):
if "https" not in dataset:
data = pd.read_json(path + dataset, orient="index")
else:
data = | pd.read_json(dataset, orient="index") | pandas.read_json |
from itertools import combinations
import pandas as pd
import numpy as np
import scipy.stats as stats
import random
from diffex import constants
IUPHAR_Channels_names = constants.IUPHAR_Channels_names
def clean_dataframe(df):
""" Return a cleaned dataframe with NaN rows removed and duplicate
fold change measurements averaged """
# Select all rows from the df that don't have NA
clean_df = df.loc[df['Gene name'].notnull(), :]
# Select only rows with Gene names that are duplicated
dup_df = clean_df[clean_df.duplicated(subset='Gene name',keep=False)]
dup_df = dup_df.sort_values(by=['Gene name'])
try: # won't work if no duplicates to average
# Average duplicate fold change measurements
dup_df = dup_df.groupby('Gene name',as_index=False).mean()
dup_df = dup_df.round(3)
except:
print(f'No duplicated gene names in dataset for df with column 1: {df.columns[1]}')
pass
# Drop rows from the original dataframe that are in the duplicate df
cond = clean_df['Gene name'].isin(dup_df['Gene name'])
clean_df.drop(clean_df[cond].index, inplace = True)
clean_df = clean_df.append(dup_df)
clean_df = clean_df.reset_index(drop=True)
del dup_df
return clean_df
def get_combinations(names_list, k):
""" Return a list of unique combinations (each element is a tuple)
from the names_list """
return list(combinations(names_list, k))
def find_pairwise_overlaps(dfs_dict):
""" Return a dataframe with a column holding the overlapping
set of 'Gene name's for each unique pair of models in the
dfs_dict """
model_pairs = get_combinations(names_list=list(dfs_dict.keys()), k=2)
overlaps_dict = {}
for combi_tuple in model_pairs:
# create a name to be used for this combination's
# dataframe column
combi_name = '-'.join(combi_tuple)
# find overlap between the two model gene name columns
df_1 = dfs_dict[combi_tuple[0]]
df_2 = dfs_dict[combi_tuple[1]]
overlap_df = pd.merge(df_1, df_2, on='Gene name')
overlaps_dict[combi_name] = overlap_df['Gene name']
overlaps_df = pd.DataFrame(overlaps_dict)
return overlaps_df
def find_triplet_overlaps(dfs_dict):
""" Return a dataframe with a column holding the overlapping
set of 'Gene name's for each unique unique group of three
models in the dfs_dict """
model_trips = get_combinations(names_list=list(dfs_dict.keys()), k=3)
overlaps_dict = {}
for combi_tuple in model_trips:
# create a name to be used for this combination's
# dataframe column
combi_name = '-'.join(combi_tuple)
# find overlap between the two model gene name columns
df_1 = dfs_dict[combi_tuple[0]]
df_2 = dfs_dict[combi_tuple[1]]
df_3 = dfs_dict[combi_tuple[2]]
overlap_df = | pd.merge(df_1, df_2, on='Gene name') | pandas.merge |
import docx
from docx.shared import Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH, WD_BREAK
from docx.shared import Cm
import os
import math
import pandas as pd
import numpy as np
import re
from datetime import date
import streamlit as st
import json
import glob
from PIL import Image
import smtplib
import docx2pdf
import shutil
import zipfile
from datetime import datetime
import platform
import matplotlib.pyplot as plt
def User_validation():
f=open("Validation/Validation.json","r")
past=json.loads(f.read())
f.close()
now=datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M")
time_past=datetime.strptime(past['Acceso']["Hora"], "%d/%m/%Y %H:%M")
timesince = now - time_past
Time_min= int(timesince.total_seconds() / 60)
bool_negate = Time_min<120
if not bool_negate:
past['Acceso'].update({"Estado":"Negado"})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
bool_aprove= past['Acceso']["Estado"]=="Aprovado"
if not bool_aprove:
colums= st.columns([1,2,1])
with colums[1]:
#st.image("Imagenes/Escudo_unal.png")
st.subheader("Ingrese el usuario y contraseña")
Usuario=st.text_input("Usuario")
Clave=st.text_input("Contraseña",type="password")
Users=["Gestor Comercial"]
bool_user = Usuario in Users
bool_clave = (Clave)==("1234")
bool_user_email = past['Acceso']["User"] == Usuario
bool_time2 = Time_min<1000
bool_1 = bool_time2 and bool_user_email
bool_2 = bool_user and bool_clave
if not bool_user_email and bool_2:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
if not bool_2:
if (Usuario != "") and (Clave!=""):
with colums[1]:
st.warning("Usuario o contraseña incorrectos.\n\n Por favor intente nuevamente.")
elif bool_2 and not bool_1:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
EMAIL_ADDRESS = '<EMAIL>'
EMAIL_PASSWORD = '<PASSWORD>'
try:
with smtplib.SMTP('smtp.gmail.com', 587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
subject = 'Acceso aplicacion Julia'
body = 'Acceso usuario ' + Usuario +' el '+dt_string
msg = f'Subject: {subject}\n\n{body}'
smtp.sendmail(EMAIL_ADDRESS, EMAIL_ADDRESS, msg)
except:
pass
with colums[1]:
st.button("Acceder a la aplicación")
elif bool_2:
past['Acceso'].update({"Estado":"Aprovado","Hora":dt_string,"User":Usuario})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
with colums[1]:
st.button("Acceder a la aplicación")
return bool_aprove
def Num_dias(leng):
if leng==1:
return "1 día"
else:
return str(leng) + " días"
def day_week(dia):
if dia ==0:
Dia="Lunes"
elif dia ==1:
Dia="Martes"
elif dia ==2:
Dia="Miércoles"
elif dia ==3:
Dia="Jueves"
elif dia ==4:
Dia="Viernes"
elif dia ==5:
Dia="Sábado"
elif dia ==6:
Dia="Domingo-Festivo"
return Dia
def remove_row(table, row):
tbl = table._tbl
tr = row._tr
tbl.remove(tr)
def Range_fecha(dates):
if len(dates)==1:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')
else:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')+" hasta "+ pd.to_datetime(dates[-1]).strftime('%Y-%m-%d')
def any2str(obj):
if isinstance(obj, str):
return obj
elif math.isnan(obj):
return ""
elif isinstance(obj, int):
return str(obj)
elif isinstance(obj, float):
return str(obj)
def dt_fechas(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
for dia in Fechas:
data_fecha=data_user[data_user["Fecha"]== dia]
data_dia_todos=data[data["Fecha"]==dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_dia_todos["CANTIDAD"].sum(),data_fecha["CANTIDAD"].sum()]],columns=["Dia","Fecha","Requerimiento","Respaldo"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def dt_fechas_2(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
for dia in Fechas:
data_fecha=data_user[data_user["FECHA"]== dia]
data_dia_todos=data[data["FECHA"]==dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_dia_todos["CANTIDAD"].sum(),data_fecha["CANTIDAD"].sum()]],columns=["Dia","Fecha","Requerimiento","Respaldo"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def dt_fechas_3(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Respaldo","P_neto","TRM","PRECIO PONDERADO"])
for dia in Fechas:
data_fecha=data_user[data_user["FECHA"]== dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_fecha["CANTIDAD"].sum(),data_fecha["P NETO"].sum(),round(data_fecha["TRM"].mean(),2),round(data_fecha["PRECIO PONDERADO"].mean(),2)]],
columns=["Dia","Fecha","Respaldo","P_neto","TRM","PRECIO PONDERADO"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def Mes_espa(mes):
if mes =="01":
Mes="Enero"
elif mes =="02":
Mes="Febrero"
elif mes =="03":
Mes="Marzo"
elif mes =="04":
Mes="Abril"
elif mes =="05":
Mes="Mayo"
elif mes =="06":
Mes="Junio"
elif mes =="07":
Mes="Julio"
elif mes =="08":
Mes="Agosto"
elif mes =="09":
Mes="Septiembre"
elif mes =="10":
Mes="Octubre"
elif mes =="11":
Mes="Noviembre"
elif mes =="12":
Mes="Diciembre"
return Mes
def F_Liq_pag(mes,ano):
if mes%12 ==1:
Fecha ="Enero"
elif mes%12 ==2:
Fecha ="Febrero"
elif mes%12 ==3:
Fecha ="Marzo"
elif mes%12 ==4:
Fecha ="Abril"
elif mes%12 ==5:
Fecha ="Mayo"
elif mes%12 ==6:
Fecha ="Junio"
elif mes%12 ==7:
Fecha ="Julio"
elif mes%12 ==8:
Fecha="Agosto"
elif mes%12 ==9:
Fecha="Septiembre"
elif mes%12 ==10:
Fecha="Octubre"
elif mes%12 ==11:
Fecha="Noviembre"
elif mes%12 ==0:
Fecha="Diciembre"
if mes > 12:
Fecha += " "+ str(ano+1)
else:
Fecha += " "+ str(ano)
return Fecha
def num2money(num):
if num < 1e3:
return str(round(num,2))
elif num < 1e6:
return str(round(num*1e3/1e6,2))+ " miles."
elif num < 1e9:
return str(round(num*1e3/1e9,2))+ " mill."
elif num < 1e12:
return str(round(num*1e3/1e12,2))+ " mil mill."
def mes_espa(mes):
if mes =="01":
Mes="enero"
elif mes =="02":
Mes="febrero"
elif mes =="03":
Mes="marzo"
elif mes =="04":
Mes="abril"
elif mes =="05":
Mes="mayo"
elif mes =="06":
Mes="junio"
elif mes =="07":
Mes="julio"
elif mes =="08":
Mes="agosto"
elif mes =="09":
Mes="septiembre"
elif mes =="10":
Mes="octubre"
elif mes =="11":
Mes="noviembre"
elif mes =="12":
Mes="diciembre"
return Mes
def mes_num(mes):
Opciones2=("Enero","Febrero","Marzo","Abril","Mayo","Junio","Julio","Agosto","Septiembre","Octubre","Noviembre","Diciembre")
if mes == Opciones2[0]:
Mes="01"
elif mes == Opciones2[1]:
Mes="02"
elif mes == Opciones2[2]:
Mes="03"
elif mes == Opciones2[3]:
Mes="04"
elif mes == Opciones2[4]:
Mes="05"
elif mes == Opciones2[5]:
Mes="06"
elif mes == Opciones2[6]:
Mes="07"
elif mes == Opciones2[7]:
Mes="08"
elif mes == Opciones2[8]:
Mes="09"
elif mes == Opciones2[9]:
Mes="10"
elif mes == Opciones2[10]:
Mes="11"
elif mes == Opciones2[11]:
Mes="12"
return Mes
def dia_esp(dia):
if dia =="01":
Dia="1"
elif dia =="02":
Dia="2"
elif dia =="03":
Dia="3"
elif dia =="04":
Dia="4"
elif dia =="05":
Dia="5"
elif dia =="06":
Dia="6"
elif dia =="07":
Dia="7"
elif dia =="08":
Dia="8"
elif dia =="09":
Dia="9"
else :
Dia = dia
return Dia
def set_font(rows,fila,col,size):
run=rows[fila].cells[col].paragraphs[0].runs
font = run[0].font
font.size= Pt(size)
font.name = 'Tahoma'
def replace_text_for_image(paragraph, key, value,wid,hei):
if key in paragraph.text:
inline = paragraph.runs
for item in inline:
if key in item.text:
item.text = item.text.replace(key, "")
for val in value:
r = paragraph.add_run()
r.add_picture(val,width=Cm(wid), height=Cm(hei))
def replace_text_in_paragraph(paragraph, key, value):
if key in paragraph.text:
inline = paragraph.runs
for item in inline:
if key in item.text:
item.text = item.text.replace(key, value)
def delete_columns(table, columns):
# sort columns descending
columns.sort(reverse=True)
grid = table._tbl.find("w:tblGrid", table._tbl.nsmap)
for ci in columns:
for cell in table.column_cells(ci):
cell._tc.getparent().remove(cell._tc)
# Delete column reference.
col_elem = grid[ci]
grid.remove(col_elem)
st.set_page_config(
layout="centered", # Can be "centered" or "wide". In the future also "dashboard", etc.
initial_sidebar_state="auto", # Can be "auto", "expanded", "collapsed"
page_title="JULIA RD", # String or None. Strings get appended with "• Streamlit".
page_icon="📊", # String, anything supported by st.image, or None.
)
if User_validation():
#if True:
Opciones1=("Oferta Firme de Respaldo","Certificado de Reintegros","Informe Comercial")
eleccion=st.sidebar.selectbox('Seleccione el proyecto',Opciones1)
#if False:
if eleccion==Opciones1[0]:
st.header("Creación ofertas firmes de respaldo")
st.subheader("Introducción de los documentos")
colums= st.columns([1,1,1])
with colums[0]:
uploaded_file_1 = st.file_uploader("Suba el consolidado base")
with colums[1]:
uploaded_file_2 = st.file_uploader("Suba la plantilla del documento")
with colums[2]:
uploaded_file_3 = st.file_uploader("Suba el excel adicional")
if (uploaded_file_1 is not None) and (uploaded_file_2 is not None) and (uploaded_file_3 is not None):
try:
data=pd.read_excel(uploaded_file_1)
Extras=pd.read_excel(uploaded_file_3,sheet_name="Usuarios")
Tipo_dia=pd.read_excel(uploaded_file_3,sheet_name="Calendario")
except:
st.warning("Recuerde que el formato del Excel tiene que ser xls")
data["Fecha"]=data["FECHAINI"].dt.to_pydatetime()
if data["USUARIO"].isnull().values.any():
st.warning("Revisar archivo de consolidado base, usuario no encontrado.")
data.dropna(subset = ["USUARIO"], inplace=True)
Users=pd.unique(data["USUARIO"])
else:
Users=pd.unique(data["USUARIO"])
Extras=pd.read_excel(uploaded_file_3,sheet_name="Usuarios")
Tipo_dia=pd.read_excel(uploaded_file_3,sheet_name="Calendario")
template_file_path = uploaded_file_2
today = date.today()
fecha=dia_esp(today.strftime("%d")) +" de "+ mes_espa(today.strftime("%m")) +" de "+ today.strftime("%Y")
colums= st.columns([1,4,1])
with colums[1]:
st.subheader("Introducción de las variables")
P_bolsa=st.text_input("Introduzca el Precio de Escasez de Activación",value="10.00")
P_contrato=st.text_input("Introduzca el precio del contrato [USD]",value="10.00")
P_TMR=st.text_input("Introduzca el valor de la TRM",value="3,950.00")
F_TRM = st.date_input("Seleccione la fecha del valor de la TRM:",value=today).strftime("%Y-%m-%d")
Agente_extra = st.text_input("Introduzca el nombre particular del agente")
columns_2 = st.columns([1,2,2,1])
Opciones2=("Enero","Febrero","Marzo","Abril","Mayo","Junio","Julio","Agosto","Septiembre","Octubre","Noviembre","Diciembre")
Opciones3=("I","II","III","IV","V")
with columns_2[1]:
eleccion2=st.selectbox('Seleccione el mes de la OFR',Opciones2)
with columns_2[2]:
eleccion3=st.selectbox('Selecciona la semana de la OFR',Opciones3)
if Agente_extra:
Agente_extra="_"+Agente_extra
else:
Agente_extra=""
columns_3 = st.columns([2,1,2])
with columns_3[1]:
if platform.system()=='Windows':
b=st.checkbox("PDF")
else:
b=False
a=st.button("Crear los documentos")
Ruta="Documentos/OFR/"+str(today.year)+"/"+mes_num(eleccion2)+"-"+eleccion2 +"/"+ eleccion3
Ruta_x="Documentos_exportar/"
if os.path.exists(Ruta_x):
shutil.rmtree(Ruta_x)
Ruta_x=Ruta_x+"/"
Ruta_x=Ruta_x+"/"
os.makedirs(Ruta_x, exist_ok=True)
if a:
try:
path1 = os.path.join(Ruta)
shutil.rmtree(path1)
os.makedirs(Ruta, exist_ok=True)
except:
os.makedirs(Ruta, exist_ok=True)
Ruta_word=Ruta+"/Word"
Ruta_pdf=Ruta+"/PDF"
Info ={"Ruta": Ruta,
"File_names": None
}
File_names=[]
os.makedirs(Ruta_word, exist_ok=True)
if b:
os.makedirs(Ruta_pdf, exist_ok=True)
zf = zipfile.ZipFile(
"Resultado.zip", "w", zipfile.ZIP_DEFLATED)
my_bar=st.progress(0)
steps=len(Users)
steps_done=0
for usuario in Users:
data_user=data.copy()
data_user=data_user[data_user["USUARIO"]==usuario]
Empresas = pd.unique(data_user["agente1"])
Respaldo = data[data["USUARIO"]== usuario]["CANTIDAD"].sum()
Fechas = pd.unique(data_user["Fecha"])
R_fechas = Range_fecha(Fechas)
Data_frame_fechas=dt_fechas(data.copy(),data_user,Fechas,Tipo_dia)
try:
Email = str(Extras[Extras["USUARIO"] == usuario]["CORREO"].values)
Porc_come = Extras[Extras["USUARIO"] == usuario]["MARGEN"].values[0]
except:
Email = ""
Porc_come = 0.1
st.warning("No hay coincidencia en el Excel de usuarios para: "+usuario)
Email = re.sub("\[|\]|\'|0","",Email)
tx_empresas=""
for idx ,val in enumerate(Empresas):
if len(Empresas)<4:
val_2=val[0:3]
tx_empresas += val_2
if idx==len(Empresas)-1:
pass
else:
tx_empresas +=", "
else:
tx_empresas += "Los Generadores"
P_kwh=float(re.sub(",","",P_TMR))*float(P_contrato)/1000
Ingreso=int(P_kwh*Respaldo)
C_comer=int(Ingreso*Porc_come)
C_GMS=int(Ingreso*4/1000)
I_NETO=Ingreso-C_comer-C_GMS
if len(Data_frame_fechas.index.values)>13:
Enter="\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
else:
Enter=""
variables = {
"${FECHA}": fecha,
"${MES}": eleccion2,
"${AGENTES}": tx_empresas,
"${USUARIO}": usuario,
"${PRECIO_BOLSA}": P_bolsa,
"${PRECIO_CONTRATO}": P_contrato,
"${FECHA_TRM}": F_TRM,
"${PRECIO_TRM}": P_TMR,
"${EMAIL_USUARIO}": Email,
"${PRECIO_PKWH}":str(round(P_kwh,2)),
"${PORC_COMER}":str(int(Porc_come*100))+"%",
"${RESPALDO_TOT}":f'{Respaldo:,}',
"${INGRESO}":f'{Ingreso:,}',
"${COST_COME}":f'{C_comer:,}',
"${COST_GMS}":f'{C_GMS:,}',
"${INGRESO_NETO}": f'{I_NETO:,}',
"${NUM_DIAS}":Num_dias(len(Fechas)),
"${RANGO_FECHAS_1}": R_fechas,
"${ENTER}": Enter,
"${MES_LIQUIDACION}": F_Liq_pag(Opciones2.index(eleccion2)+2,int(today.strftime("%Y"))),
"${MES_PAGO}": F_Liq_pag(Opciones2.index(eleccion2)+3,int(today.strftime("%Y"))),
"${INDICADOR}": eleccion3
}
template_document = docx.Document(template_file_path)
for variable_key, variable_value in variables.items():
for section in template_document.sections:
for paragraph in section.header.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
for paragraph in template_document.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
for table in template_document.tables:
for col in table.columns:
for cell in col.cells:
for paragraph in cell.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
rows = template_document.tables[1].rows
index_1=Data_frame_fechas.index.values
Acum_Req=0
Acum_Res=0
for idx in index_1:
rows[int(idx)+1].cells[0].text = Data_frame_fechas.iloc[idx]["Dia"]
rows[int(idx)+1].cells[1].text = Data_frame_fechas.iloc[idx]["Fecha"].strftime('%Y-%m-%d')
rows[int(idx)+1].cells[2].text = f'{Data_frame_fechas.iloc[idx]["Requerimiento"]:,}'
Acum_Req += Data_frame_fechas.iloc[idx]["Requerimiento"]
rows[int(idx)+1].cells[3].text = f'{Data_frame_fechas.iloc[idx]["Respaldo"]:,}'
Acum_Res += Data_frame_fechas.iloc[idx]["Respaldo"]
for idx_2 in range(0,4):
run=rows[int(idx)+1].cells[idx_2].paragraphs[0].runs
font = run[0].font
font.size= Pt(10)
font.name = 'Tahoma'
for idx in np.arange(len(index_1)+1,37):
remove_row(template_document.tables[1], rows[len(index_1)+1])
rows[-1].cells[1].text = Num_dias(len(Fechas))
rows[-1].cells[2].text = f'{Acum_Req:,}'
rows[-1].cells[3].text = f'{Acum_Res:,}'
version=1
template_document.save(Ruta_x+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx")
zf.write(Ruta_x+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx")
if b:
docx2pdf.convert(Ruta_x+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx",
Ruta_pdf+"/"+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".pdf")
zf.write(Ruta_pdf+"/"+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".pdf")
File_names.extend([usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx"])
steps_done += 1
my_bar.progress(int(steps_done*100/steps))
Info.update({"File_names":File_names})
json_info = json.dumps(Info, indent = 4)
with open(Ruta_x+'/00_data.json', 'w') as f:
json.dump(json_info, f)
zf.write(Ruta_x+'/00_data.json')
zf.close()
with open("Resultado.zip", "rb") as fp:
with columns_3[1]:
btn = st.download_button(
label="Descargar resultados",
data=fp,
file_name="Resultado.zip",
mime="application/zip"
)
else:
st.warning("Necesita subir los tres archivos")
#elif False:
elif eleccion==Opciones1[1]:
st.header("Creación certificados de reintegros")
st.subheader("Introducción de los documentos")
if True:
colums= st.columns([1,1,1])
with colums[0]:
uploaded_file_1 = st.file_uploader("Suba el documento de liquidación")
with colums[1]:
uploaded_file_2 = st.file_uploader("Suba la plantilla del documento")
with colums[2]:
uploaded_file_3 = st.file_uploader("Suba el excel adicional")
else:
uploaded_file_1="Liquidacion_base.xlsm"
uploaded_file_2="Certificado_base.docx"
uploaded_file_3="Excel_extra_certificados.xls"
if (uploaded_file_1 is not None) and (uploaded_file_2 is not None) and (uploaded_file_3 is not None):
try:
data=pd.read_excel(uploaded_file_1)
Extras=pd.read_excel(uploaded_file_3,sheet_name="Usuarios")
Tipo_dia=pd.read_excel(uploaded_file_3,sheet_name="Calendario")
Agentes=pd.read_excel(uploaded_file_3,sheet_name="Agentes")
except:
st.warning("Recuerde que el formato del Excel tiene que ser xls")
data["FECHA"]=data["FECHA"].dt.to_pydatetime()
if data["USUARIO"].isnull().values.any():
st.warning("Revisar archivo de consolidado base, usuario no encontrado.")
data.dropna(subset = ["USUARIO"], inplace=True)
Users=pd.unique(data["USUARIO"])
else:
Users=pd.unique(data["USUARIO"])
template_file_path = uploaded_file_2
today = date.today()
fecha=dia_esp(today.strftime("%d")) +" de "+ mes_espa(today.strftime("%m")) +" de "+ today.strftime("%Y")
colums= st.columns([1,4,1])
with colums[1]:
st.subheader("Introducción de las variables")
F_TRM = st.date_input("Seleccione la fecha del valor de la TRM:",value=today).strftime("%Y-%m-%d")
P_TMR=str(round(data["TRM"].mean(),2))
columns_2 = st.columns([1,2,2,1])
Opciones2=("Enero","Febrero","Marzo","Abril","Mayo","Junio","Julio","Agosto","Septiembre","Octubre","Noviembre","Diciembre")
with columns_2[1]:
eleccion3=st.number_input('Seleccione el año del cerficado',value=today.year)
with columns_2[2]:
eleccion2=st.selectbox('Seleccione el mes del cerficado',Opciones2)
columns_3 = st.columns([2,1,2])
with columns_3[1]:
if platform.system()=='Windows':
b=st.checkbox("PDF")
else:
b=False
a=st.button("Crear los documentos")
Ruta="Documentos/Certificados/"+str(eleccion3) +"/"+ mes_num(eleccion2)+"-"+eleccion2
Ruta_x="Documentos_exportar"
if os.path.exists(Ruta_x):
shutil.rmtree(Ruta_x)
Ruta_x=Ruta_x+"/"
Ruta_x=Ruta_x+"/"
os.makedirs(Ruta_x, exist_ok=True)
if a:
try:
path1 = os.path.join(Ruta)
shutil.rmtree(path1)
os.makedirs(Ruta, exist_ok=True)
except:
os.makedirs(Ruta, exist_ok=True)
Ruta_word=Ruta+"/Word"
Ruta_pdf=Ruta+"/PDF"
Info ={"Ruta": Ruta,
"File_names": None
}
File_names=[]
os.makedirs(Ruta_word, exist_ok=True)
if b:
os.makedirs(Ruta_pdf, exist_ok=True)
os.makedirs(Ruta_word, exist_ok=True)
if b:
os.makedirs(Ruta_pdf, exist_ok=True)
zf = zipfile.ZipFile(
"Resultado.zip", "w", zipfile.ZIP_DEFLATED)
my_bar=st.progress(0)
steps=len(Users)
steps_done=0
for usuario in Users:
data_user=data.copy()
data_user=data_user[data_user["USUARIO"]==usuario]
Empresas = pd.unique(data_user["COMPRADOR"])
Respaldo = data[data["USUARIO"]== usuario]["CANTIDAD"].sum()
Fechas = pd.unique(data_user["FECHA"])
R_fechas = Range_fecha(Fechas)
Data_frame_fechas=dt_fechas_2(data.copy(),data_user,Fechas,Tipo_dia)
try:
Email = str(Extras[Extras["USUARIO"] == usuario]["CORREO"].values)
Porc_come = Extras[Extras["USUARIO"] == usuario]["MARGEN"].values[0]
except:
Email = ""
Porc_come = 0.1
st.warning("No hay coincidencia en el Excel de usuarios para: "+usuario)
Email = re.sub("\[|\]|\'|0","",Email)
# tx_empresas=""
# for idx ,val in enumerate(Empresas):
# if len(Empresas)<4:
# val_2=val[0:3]
# tx_empresas += val_2
# if idx==len(Empresas)-1:
# pass
# else:
# tx_empresas +=", "
# else:
# tx_empresas += "Los Generadores"
if len(Data_frame_fechas.index.values)>13:
Enter="\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
else:
Enter=""
REQ_MAXIMO=Data_frame_fechas["Respaldo"].max()
Valor_total=int(round(data_user["P NETO"].sum()))
variables = {
"${FECHA}": fecha,
"${MES}": eleccion2,
"${ANO}": str(eleccion3),
# "${AGENTES}": tx_empresas,
"${USUARIO}": usuario,
"${OFERTA_MAX}": f'{REQ_MAXIMO:,}',
"${FECHA_TRM}": F_TRM,
"${P_TRM}": P_TMR,
"${EMAIL_USUARIO}": Email,
"${PORC_COMER}":str(int(Porc_come*100))+"%",
"${RESPALDO_TOT}":"$ "+f'{Valor_total:,}',
"${NUM_DIAS}":Num_dias(len(Fechas)),
"${RANGO_FECHAS_1}": R_fechas,
"${ENTER}": Enter,
"${MES_LIQUIDACION}": F_Liq_pag(Opciones2.index(eleccion2)+2,int(today.strftime("%Y"))),
"${MES_PAGO}": F_Liq_pag(Opciones2.index(eleccion2)+3,int(today.strftime("%Y"))),
"${INDICADOR}": eleccion3
}
template_document = docx.Document(template_file_path)
for variable_key, variable_value in variables.items():
for section in template_document.sections:
for paragraph in section.header.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
for paragraph in template_document.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
for table in template_document.tables:
for col in table.columns:
for cell in col.cells:
for paragraph in cell.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
rows = template_document.tables[1].rows
index_1=Agentes.index.values
Total_ingreso=0
Total_respaldo=0
dt_participa=pd.DataFrame(columns=["OFR","INGRESO","FILA_1","FILA_2"])
for idx in index_1:
run=rows[int(idx)+3].cells[0].paragraphs[0]
rows[int(idx)+3].cells[0].text = "Precio "+Agentes.iloc[idx]["AGENTE"]
rows[int(idx)+3].cells[1].text = f'{Agentes.iloc[idx]["PRECIO"]:,}'
rows[int(idx)+3].cells[2].text = f'{round(float(re.sub(",","",P_TMR))*Agentes.iloc[idx]["PRECIO"]/1000,2):,}'
Respaldo=data_user[data_user["OFR"]==Agentes.iloc[idx]["OFR"]]["CANTIDAD"].sum()
rows[int(idx)+15].cells[0].text = "Respaldo "+Agentes.iloc[idx]["AGENTE"]
rows[int(idx)+15].cells[1].text = "kWh"
rows[int(idx)+15].cells[2].text = f'{Respaldo:,}'
for idx_2 in range(0,3):
run=rows[int(idx)+3].cells[idx_2].paragraphs[0].runs
font = run[0].font
font.size= Pt(10)
font.name = 'Tahoma'
for idx_2 in range(0,3):
run=rows[int(idx)+15].cells[idx_2].paragraphs[0].runs
font = run[0].font
font.size= Pt(10)
font.name = 'Tahoma'
if idx_2==0:
font.bold= True
Total_respaldo += Respaldo
df=pd.DataFrame([[Agentes.iloc[idx]["OFR"],Respaldo,int(idx)+3,int(idx)+15]],columns=["OFR","INGRESO","FILA_1","FILA_2"])
dt_participa=dt_participa.append(df, ignore_index=True)
Total_ingreso += Respaldo*float(re.sub(",","",P_TMR))*Agentes.iloc[idx]["PRECIO"]/1000
filas=[22,24,25,26,28]
valores=[Total_respaldo,
Total_ingreso,
Total_ingreso*Porc_come,
Total_ingreso*0.004,
Valor_total]
for idx, val in enumerate(filas):
rows[val].cells[2].text = f'{int(round(valores[idx])):,}'
run=rows[val].cells[2].paragraphs[0].runs
font = run[0].font
font.size= Pt(10)
font.name = 'Tahoma'
contador1=0
contador2=0
for idx in list(dt_participa.index):
if dt_participa.iloc[idx]["INGRESO"]==0:
remove_row(template_document.tables[1], rows[dt_participa.iloc[idx]["FILA_1"]+contador1])
contador1 -= 1
remove_row(template_document.tables[1], rows[dt_participa.iloc[idx]["FILA_2"]+contador1+contador2])
contador2 -= 1
rows = template_document.tables[2].rows
index_1=Data_frame_fechas.index.values
Acum_Req=0
Acum_Res=0
for idx in index_1:
rows[int(idx)+1].cells[0].text = Data_frame_fechas.iloc[idx]["Fecha"].strftime('%Y-%m-%d')
rows[int(idx)+1].cells[1].text = Data_frame_fechas.iloc[idx]["Dia"]
rows[int(idx)+1].cells[2].text = f'{Data_frame_fechas.iloc[idx]["Requerimiento"]:,}'
Acum_Req += Data_frame_fechas.iloc[idx]["Requerimiento"]
rows[int(idx)+1].cells[3].text = f'{Data_frame_fechas.iloc[idx]["Respaldo"]:,}'
Acum_Res += Data_frame_fechas.iloc[idx]["Respaldo"]
for idx_2 in range(0,4):
run=rows[int(idx)+1].cells[idx_2].paragraphs[0].runs
font = run[0].font
font.size= Pt(10)
font.name = 'Tahoma'
for idx in np.arange(len(index_1)+1,37):
remove_row(template_document.tables[2], rows[len(index_1)+1])
#rows[-1].cells[1].text = Num_dias(len(Fechas))
rows[-1].cells[2].text = f'{Acum_Req:,}'
rows[-1].cells[3].text = f'{Acum_Res:,}'
for idx_2 in range(1,4):
run=rows[-1].cells[idx_2].paragraphs[0].runs
font = run[0].font
font.size= Pt(10)
font.name = 'Tahoma'
# version=1
name_word=usuario+"_Certificado_Reintegros_"+eleccion2+"_"+str(eleccion3)+".docx"
name_pdf=usuario+"_Certificado_Reintegros_"+eleccion2+"_"+str(eleccion3)+".pdf"
template_document.save(Ruta_x+name_word)
zf.write(Ruta_x+name_word)
if b:
docx2pdf.convert(Ruta_x+name_word, Ruta_x+name_pdf)
zf.write(Ruta_x+name_pdf)
File_names.extend([name_word])
steps_done += 1
my_bar.progress(int(steps_done*100/steps))
Info.update({"File_names":File_names})
json_info = json.dumps(Info, indent = 4)
with open(Ruta_x+'/00_data.json', 'w') as f:
json.dump(json_info, f)
zf.write(Ruta_x+'/00_data.json')
zf.close()
with open("Resultado.zip", "rb") as fp:
with columns_3[1]:
btn = st.download_button(
label="Descargar resultados",
data=fp,
file_name="Resultado.zip",
mime="application/zip"
)
else:
st.warning("Necesita subir los tres archivos")
#elif True:
elif eleccion==Opciones1[2]:
st.header("Creación consolidados mensuales")
st.subheader("Introducción de los documentos")
if True:
colums= st.columns([1,1])
with colums[0]:
uploaded_file_1 = st.file_uploader("Suba el documento de base principal")
with colums[1]:
uploaded_file_2 = st.file_uploader("Suba la plantilla del documento")
# with colums[2]:
# uploaded_file_3 = st.file_uploader("Suba el excel adicional")
else:
uploaded_file_1="Excel_base_informe.xls"
uploaded_file_2="Plantilla_base.docx"
# uploaded_file_3="Excel_extra_certificados.xls"
if (uploaded_file_1 is not None) and (uploaded_file_2 is not None): #and (uploaded_file_3 is not None):
try:
excel_1= | pd.ExcelFile(uploaded_file_1) | pandas.ExcelFile |
import pandas as pd
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy import stats
# df_2018 = pd.read_csv("/mnt/nadavrap-students/STS/data/2018_2019.csv")
# df_2016 = pd.read_csv("/mnt/nadavrap-students/STS/data/2016_2017.csv")
# df_2014 = pd.read_csv("/mnt/nadavrap-students/STS/data/2014_2015.csv")
# df_2012 = pd.read_csv("/mnt/nadavrap-students/STS/data/2012_2013.csv")
# df_2010 = pd.read_csv("/mnt/nadavrap-students/STS/data/2010_2011.csv")
#
# print (df_2018.stsrcom.unique())
# print (df_2016.stsrcom.unique())
# print (df_2014.stsrcom.unique())
# print (df_2012.stsrcom.unique())
# print (df_2010.stsrcom.unique())
# print (df_2018.stsrcHospD.unique())
# print (df_2016.stsrcHospD.unique())
# print (df_2014.stsrcHospD.unique())
# print (df_2012.stsrcHospD.unique())
# print (df_2010.stsrcHospD.unique())
# # print (df_2018.columns.tolist())
# df_union = pd.concat([df_2010, df_2012,df_2014,df_2016,df_2018], ignore_index=True)
# print (df_union)
# print (df_union['surgyear'].value_counts())
# for col in df_union.columns:
# print("Column '{}' have :: {} missing values.".format(col,df_union[col].isna().sum()))
# df_union= pd.read_csv("df_union.csv")
# cols_to_remove = []
# samples = len(df_union)
# for col in df_union.columns:
# nan_vals = df_union[col].isna().sum()
# prec_missing_vals = nan_vals / samples
# print("Column '{}' have :: {} missing values. {}%".format(col, df_union[col].isna().sum(), round(prec_missing_vals,3)))
# print (cols_to_remove)
#
# df_union.drop(cols_to_remove, axis=1, inplace=True)
# print("Number of Features : ",len(df_union.columns))
# for col in df_union.columns:
# print("Column '{}' have :: {} missing values.".format(col,df_union[col].isna().sum()))
#
# df_union.to_csv("df union after remove.csv")
# df_2018_ = pd.read_csv("/mnt/nadavrap-students/STS/data/2018_2019.csv")
df_all= pd.read_csv("/tmp/pycharm_project_723/df_union.csv")
print (df_all.reoperation.unique())
print (df_all.stsrcHospD.unique())
print (df_all.stsrcom.unique())
# mask = df_2018_['surgyear'] == 2018
# df_all = df_2018_[mask]
# mask_reop = df_all['reoperation'] == 1
# df_reop = df_all[mask_reop]
# df_op = df_all[~mask_reop]
def create_df_for_bins_hospid(col_mort):
df1 = df_all.groupby(['hospid', 'surgyear'])['hospid'].count().reset_index(name='total')
df2 = df_all.groupby(['hospid', 'surgyear'])['reoperation'].apply(lambda x: (x == 1).sum()).reset_index(
name='Reop')
df3 = df_all.groupby(['hospid', 'surgyear'])['reoperation'].apply(lambda x: (x == 0).sum()).reset_index(
name='FirstOperation')
df_aggr = pd.read_csv("aggregate_csv.csv")
mask_reop = df_all['reoperation'] == 1
df_reop = df_all[mask_reop]
df_op = df_all[~mask_reop]
dfmort = df_all.groupby(['hospid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_all')
dfmortf = df_op.groupby(['hospid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_first')
dfmortr = df_reop.groupby(['hospid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_reop')
df_comp = df_all.groupby(['hospid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_all')
df_compr = df_reop.groupby(['hospid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_reop')
df_compf = df_op.groupby(['hospid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_FirstOperation')
d1 = pd.merge(df1, df3, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d2 = pd.merge(d1, df2, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
df5 = pd.merge(df_aggr, d2, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'],
how='inner') # how='left', on=['HospID','surgyear'])
del df5["Unnamed: 0"]
d3 = pd.merge(df5, dfmort, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d4 = pd.merge(d3, dfmortf, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d5 = pd.merge(d4, dfmortr, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d6 = pd.merge(d5, df_comp, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d7 = pd.merge(d6, df_compf, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d8 = pd.merge(d7, df_compr, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
# df_sum_all_Years_total = pd.merge(d8, df_19, on='HospID', how='outer')
d8.fillna(0, inplace=True)
d8['mort_rate_All'] = (d8['Mortality_all'] / d8['total']) * 100
d8['Mortality_First_rate'] = (d8['Mortality_first'] / d8['FirstOperation']) * 100
d8['Mortality_Reop_rate'] = (d8['Mortality_reop'] / d8['Reop']) * 100
d8['Complics_rate_All'] = (d8['Complics_all'] / d8['total']) * 100
d8['Complics_First_rate'] = (d8['Complics_FirstOperation'] / d8['FirstOperation']) * 100
d8['Complics_Reop_rate'] = (d8['Complics_reop'] / d8['Reop']) * 100
d8.to_csv('hospid_year_allyears.csv')
df_PredMort_all = df_all.groupby(['hospid', 'surgyear'])['predmort'].mean().reset_index(name='PredMort_All_avg')
df_PredMort_op = df_op.groupby(['hospid', 'surgyear'])['predmort'].mean().reset_index(name='PredMort_First_avg')
df_PredMort_reop = df_reop.groupby(['hospid', 'surgyear'])['predmort'].mean().reset_index(
name='PredMort_Reoperation_avg')
df_PredComp_all = df_all.groupby(['hospid', 'surgyear'])['predmm'].mean().reset_index(name='PredComp_All_avg')
df_PredComp_op = df_op.groupby(['hospid', 'surgyear'])['predmm'].mean().reset_index(name='PredComp_First_avg')
df_PredComp_reop = df_reop.groupby(['hospid', 'surgyear'])['predmm'].mean().reset_index(
name='PredComp_Reoperation_avg')
d19 = pd.merge(d8, df_PredMort_all, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d9 = pd.merge(d19, df_PredMort_op, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d10 = pd.merge(d9, df_PredMort_reop, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d14 = pd.merge(d10, df_PredComp_all, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d11 = pd.merge(d14, df_PredComp_op, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d12 = pd.merge(d11, df_PredComp_reop, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d12.fillna(0, inplace=True)
d12['Mort_observe/expected_All'] = (d12['mort_rate_All'] / d12['PredMort_All_avg'])
d12['Mort_observe/expected_First'] = (d12['Mortality_First_rate'] / d12['PredMort_First_avg'])
d12['Mort_observe/expected_Reop'] = (d12['Mortality_Reop_rate'] / d12['PredMort_Reoperation_avg'])
d12[['log_All_Mort', 'log_First_Mort', 'log_Reoperation_Mort']] = np.log2(
d12[['Mort_observe/expected_All', 'Mort_observe/expected_First', 'Mort_observe/expected_Reop']].replace(0,
np.nan))
d12.fillna(0, inplace=True)
d12['Comp_observe/expected_All'] = (d12['Complics_rate_All'] / d12['PredComp_All_avg'])
d12['Comp_observe/expected_First'] = (d12['Complics_First_rate'] / d12['PredComp_First_avg'])
d12['Comp_observe/expected_Reop'] = (d12['Complics_Reop_rate'] / d12['PredComp_Reoperation_avg'])
d12[['log_All_Comp', 'log_First_Comp', 'log_Reoperation_Comp']] = np.log2(
d12[['Comp_observe/expected_All', 'Comp_observe/expected_First', 'Comp_observe/expected_Reop']].replace(0,
np.nan))
d12.fillna(0, inplace=True)
d12.to_csv("hospid_allyears_expec_hospid_stsrcHospD.csv")
print(d12.info())
print(d12.columns.tolist())
#create_df_for_bins_hospid('stsrcHospD')
def create_df_for_bins_surgid(col_mort):
df1 = df_all.groupby(['surgid', 'surgyear'])['surgid'].count().reset_index(name='total')
df2 = df_all.groupby(['surgid', 'surgyear'])['reoperation'].apply(lambda x: (x == 1).sum()).reset_index(
name='Reop')
df3 = df_all.groupby(['surgid', 'surgyear'])['reoperation'].apply(lambda x: (x == 0).sum()).reset_index(
name='FirstOperation')
df_aggr = pd.read_csv("/tmp/pycharm_project_723/aggregate_surgid_csv.csv")
mask_reop = df_all['reoperation'] == 1
df_reop = df_all[mask_reop]
df_op = df_all[~mask_reop]
dfmort = df_all.groupby(['surgid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_all')
dfmortf = df_op.groupby(['surgid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_first')
dfmortr = df_reop.groupby(['surgid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_reop')
df_comp = df_all.groupby(['surgid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_all')
df_compr = df_reop.groupby(['surgid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_reop')
df_compf = df_op.groupby(['surgid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_FirstOperation')
d1 = pd.merge(df1, df3, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d2 = pd.merge(d1, df2, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
df5 = pd.merge(df_aggr, d2, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'],how='inner')
# del df5["Unnamed: 0"]
d3 = pd.merge(df5, dfmort, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d4 = pd.merge(d3, dfmortf, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d5 = pd.merge(d4, dfmortr, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer')
d6 = | pd.merge(d5, df_comp, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer') | pandas.merge |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = | Series([arg], dtype="datetime64[ns, CET]") | pandas.Series |
import os
import sys
import glob
import click
import pandas as pd
from .printing import splash_screen
from .prompt import main_menu_prompt
csv_sheets = []
xlsx_sheets = []
def get_csv_df(files):
for file in files:
csv_sheets.append(str(file))
yield pd.read_csv(file)
def get_xlsx_df(files):
for file in files:
xlsx_sheets.append(str(file))
yield | pd.read_excel(file) | pandas.read_excel |
#%% [markdown]
# # Lung Vasculature Analysis
# This notebook (.ipynb) is a working project for analyzing lung vasculature. It inculdes three parts:
# 1. converts skeleton analytical output (.xml) into .csv file.
# 2. calulates the length and average thickness of each segment.
# 3. makes two types of plots:
# 1. histogram of each dataset on length and thickness
# 2. average histogram on length and thickness (line plot with error bars)
#
#%%
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import os, sys, re, io
import numpy as np
import pandas as pd
from tqdm import tqdm
import time
from core.fileop import DirCheck, ListFiles
import core.mkplot as mkplot
#%% [markdown]
# ## Part 1:
# Converting skeleton analytical output (.xml) into .csv file.
# * Inputs: *.xml
# * Outputs: *.csv
# * Dependencies: xml, time, pandas, tqdm </br>
#
# * *.xml file includes three sheets: nodes, points, and segments.
# * Warning: the progress bar controled by `tqdm` is not functioning well. It can not overwrite itself and creates multiple lines.
#%% [markdown]
# ### Functions
#%%
# import dependencies
import xml.etree.ElementTree as etree
from core.msxml import MSXmlReader
# function
def convert_xml_csv(ippath, oppath):
filelist, fileabslist = ListFiles(ippath, extension='.xml')
for idx, f in enumerate(filelist):
filename = f.replace('.xml', '')
ip = os.path.join(ippath, f)
op = os.path.join(oppath, filename)
print(ip)
print(op)
# create path
if filename not in os.listdir(oppath):
DirCheck(op)
# convert *.xml to *.csv
csv_all = MSXmlReader(ip)
# save each spreadsheet into individual *.csv file
for key, value in csv_all.items():
oppath_tmp = os.path.join(op, key + '.csv')
value.to_csv(oppath_tmp, index = False)
#%% [markdown]
# ### Execution
# To run the code, please change `path` to the directory hosts the raw data.
#%%
path = 'data'
ipdir = 'raw'
opdir = 'csv'
ippath = os.path.join(path, ipdir)
oppath = os.path.join(path, opdir)
# make dir
DirCheck(oppath)
# convert files in batch
convert_xml_csv(ippath, oppath)
#%% [markdown]
# ## Part 2:
# Calulating the length and average thickness of each segment.
# * Inputs: nodes.csv, points.csv, segments.csv
# * Outputs: segments_s.csv
#
# `SegStats` extracts euclidean coordinates and thickness of each point, then calculate the total length and average thickness.
#%% [markdown]
# ### Functions
#%%
# load dependencies
from core.filamentanalysis import SegStats, PNSCount
path = '/Volumes/LaCie_DataStorage/Woo-lungs/2019'
ipdir = 'csv'
ippath = os.path.join(path, ipdir)
img_group = []
# function
def stats_calculator(ippath, oppath):
imglist = [x for x in os.listdir(ippath) if not x.startswith('.')]
var = ['df_nodes', 'df_points', 'df_segments']
counts_combined = []
names= []
for img in imglist:
filelist, fileabslist = ListFiles(os.path.join(ippath, img), extension='.csv')
df_points = pd.read_csv(os.path.join(ippath, img, 'points.csv'))
df_segments = pd.read_csv(os.path.join(ippath, img, 'segments.csv'))
df_nodes = pd.read_csv(os.path.join(ippath, img,'nodes.csv'))
opfilename = 'segments_s.csv'
countfilename = 'count.csv'
countfilename_combined = 'counts_combined.csv'
if opfilename not in filelist:
df_segments_s = SegStats(df_points, df_segments)
df_segments_s.to_csv(os.path.join(oppath, img, opfilename), index = False)
counts = (PNSCount(df_points, df_nodes, df_segments))
counts_combined.append(counts)
names.append(img)
fileinfo = pd.read_csv(os.path.join('./par', 'lung_file_idx.csv'))
print(names)
img_group = []
img_treatment = []
for i in names:
img_group.append(fileinfo[fileinfo['data_filename'] == i]['genotype'].item())
img_treatment.append(fileinfo[fileinfo['data_filename'] == i]['treatment'].item())
if countfilename_combined not in imglist:
df_counts_combined = pd.DataFrame(counts_combined, columns= ['Points', 'Nodes', 'Segments'])
df_counts_combined['Names'] = names
df_counts_combined['Genotype'] = img_group
df_counts_combined['Treatment'] = img_treatment
df_counts_combined.to_csv(os.path.join(path, countfilename_combined), index = False)
#%% [markdown]
# ### Execution
# To run the code, please change `path` to the directory hosts the raw data.
#%%
path = '/Volumes/LaCie_DataStorage/Woo-lungs/2019'
ipdir = 'csv'
opdir = 'csv'
ippath = os.path.join(path, ipdir)
oppath = os.path.join(path, opdir)
# make dir
DirCheck(oppath)
# convert files in batch
stats_calculator(ippath, oppath)
#%% [markdown]
# ## Part 3:
# Creating two sets of plots:
# 1. histogram of each dataset on length and thickness
# 2. average histogram on length and thickness (line plot with error bars)
#
# * Inputs: segments_s.csv
# * Outputs:
# 1. `histo/length/*.png`: frequency - length (µm)
# 2. `histo/thickness/*.png`: frequency - thickness (µm)
# 3. `histo_summary/length.png`: histogram in line plot style
# 4. `histo_summary/thickness.png`: histogram in line plot style
#
# `SegStats` extracts euclidean coordinates and thickness of each point, then calculate
# the total length and average thickness.
#
#
# In the ouputs, the code renames "thickness" to "radius" to avoid confusion. Quotes from
# Amira User's Manual
# > As an estimate of the local thickness, the closest distance to the label
# boundary (boundary distance map) is stored at every point in the *Spatial Graph*.
# The attribute is named *thickness* and constitutes the *radius* of the circular cross-section
# of the filament at a given point of the centerline.
#%%
# import depandencies
import matplotlib.pyplot as plt
import matplotlib.style as style
style.use('default')
import scipy.stats as stats
from core.mkplot import GroupImg, FindRange, IndividualHisto
from core.mkplot import make_individul_plots, make_individul_plots_all, make_merged_plots
#%%
path = '/Volumes/LaCie_DataStorage/Woo-lungs/2019'
ipdir = 'csv'
opdir1 = 'plot'
opdir2 = 'histogram'
subfolder = ['histo', 'histo_summary']
ippath = os.path.join(path, ipdir)
oppath = os.path.join(path, opdir1, opdir2)
for i in subfolder:
oppath_sub = os.path.join(oppath, i)
DirCheck(oppath_sub)
#%%
# load fileinfo
fileinfo = pd.read_csv(os.path.join('./par', 'lung_file_idx.csv'))
columns = {
'length': {
'x_label': 'Length (µm)',
'file_label': 'length',
},
'thickness': {
'x_label': 'Radius (µm)',
'file_label': 'radius',
},
}
#%%
# plot individual histogram
make_individul_plots(ippath, oppath, fileinfo, columns)
#%%
path = '/Volumes/LaCie_DataStorage/Woo-lungs/2019'
ipfile = 'counts_combined.csv'
ippath_stat = os.path.join(path, ipfile)
ippath_csv = os.path.join(path, 'csv')
oppath_stat = os.path.join(path, 'plot', 'histogram', 'histo_summary')
filestat = pd.read_csv(ippath_stat)
filestat['Group'] = filestat['Genotype'] + ' & ' + filestat['Treatment']
display(filestat)
make_individul_plots_all(ippath_csv, oppath, fileinfo, filestat, columns)
#%% [markdown]
# Create plots with x-axis in different scales
#%%
# plot merged histogram in counts
make_merged_plots(ippath_csv, oppath, fileinfo, columns, frequency = False, x_max_factor = 0.07)
# plot merged histogram in frequency
make_merged_plots(ippath, oppath, fileinfo, columns, frequency = True, x_max_factor = 0.07)
#%%
# plot merged histogram in counts
make_merged_plots(ippath, oppath, fileinfo, columns, frequency = False, x_max_factor = 0.2)
# plot merged histogram in frequency
make_merged_plots(ippath, oppath, fileinfo, columns, frequency = True, x_max_factor = 0.2)
#%%
# plot merged histogram in counts
make_merged_plots(ippath, oppath, fileinfo, columns, frequency = False, x_max_factor = 1)
# plot merged histogram in frequency
make_merged_plots(ippath, oppath, fileinfo, columns, frequency = True, x_max_factor = 1)
#%% [markdown]
# ## Part 4
# ### 4-1: Plot paird scatter plot
# Module: seaborn
#%%
# from pandas.plotting import scatter_matrix
import seaborn as sns
sns.set(style="ticks", color_codes=True)
path = '/Volumes/LaCie_DataStorage/Woo-lungs/2019'
ipfile = 'counts_combined.csv'
ippath_stat = os.path.join(path, ipfile)
ippath_csv = os.path.join(path, 'csv')
oppath_stat = os.path.join(path, 'plot', 'histogram', 'histo_summary')
counts = pd.read_csv(ippath_stat)
counts['Group'] = counts['Genotype'] + ' & ' + counts['Treatment']
display(counts)
#%%
g = sns.PairGrid(counts, vars = ['Points', 'Nodes', 'Segments'], hue = 'Group')
g.map_diag(plt.hist)
g.map_offdiag(plt.scatter)
g.add_legend()
g.savefig(os.path.join(oppath_stat, "pairgrid.png"))
#%% [markdown]
# ### 4-2: Plot histogram comparison
# Module: seaborn
#%%
labels, uniques = pd.factorize(counts['Group'])
for i in uniques:
print(i)
tmp_df = counts[counts['Group'] == i]
data_merge = []
filename = tmp_df['Names']
df_all = []
for index, value in filename.items():
print(value)
tmp_df2 = pd.read_csv(os.path.join(ippath_csv, value, 'segments_s.csv'))
tmp_df2['Filename'] = value
df_all.append(tmp_df2)
df_all_con = pd.concat(df_all)
# display(df_all_con)
g = sns.FacetGrid(df_all_con, col="Filename")
g.map(plt.hist, 'thickness', density = True)
#%% [markdown]
# ## Part 5: histogram standardization
#
#%%
from core.mkplot import histo_standardize, make_merged_plots_std
path = '/Volumes/LaCie_DataStorage/Woo-lungs/2019'
ipfile = 'counts_combined.csv'
ippath_stat = os.path.join(path, ipfile)
ippath_csv = os.path.join(path, 'csv')
histo_standardize(ippath_csv)
#%% [markdown]
# ## Part 6: Plots with standardize data
#
#%%
path = '/Volumes/LaCie_DataStorage/Woo-lungs/2019'
ipfile = 'counts_combined.csv'
ippath_stat = os.path.join(path, ipfile)
ippath_csv = os.path.join(path, 'csv')
oppath = os.path.join(path, 'plot', 'histogram')
DirCheck(oppath)
columns = {
'length': {
'x_label': 'Standard Deviation',
'file_label': 'length',
},
'thickness': {
'x_label': 'Standard Deviation',
'file_label': 'radius',
},
}
make_merged_plots_std(ippath_csv, oppath, fileinfo, columns, opdir = 'histo_summary_std', filename = 'segments_s_std.csv', frequency = False, x_max_factor = 1)
make_merged_plots_std(ippath_csv, oppath, fileinfo, columns, opdir = 'histo_summary_std', filename = 'segments_s_std.csv', frequency = True, x_max_factor = 1)
make_merged_plots_std(ippath_csv, oppath, fileinfo, columns, opdir = 'histo_summary_std', filename = 'segments_s_std.csv', frequency = False, x_max_factor = 0.2)
make_merged_plots_std(ippath_csv, oppath, fileinfo, columns, opdir = 'histo_summary_std', filename = 'segments_s_std.csv', frequency = True, x_max_factor = 0.2)
#%% [markdown]
# # Part 6
# Plot the Points, Nodes, and Segment Count in Bokeh with Holoview
#%%
import numpy as np
import pandas as pd
import holoviews as hv
from holoviews import opts, Cycle
#%%
hv.extension('bokeh')
path = '/Volumes/LaCie_DataStorage/Woo-lungs/2019'
ipfile = 'counts_combined.csv'
ippath = os.path.join(path, ipfile)
counts = pd.read_csv(ippath)
f1 = hv.Scatter((zip(counts.Points.items(), counts.Nodes.items())), ['Points'], ['Nodes'])
f2 = hv.Scatter((zip(counts.Points.items(), counts.Segments.items())), ['Points'], ['Segments'])
f3 = hv.Scatter((zip(counts.Nodes.items(), counts.Points.items())), ['Nodes'], ['Points'])
f4 = hv.Scatter((zip(counts.Nodes.items(), counts.Segments.items())), ['Nodes'], ['Segments'])
f5 = hv.Scatter((zip(counts.Segments.items(), counts.Points.items())), ['Segments'], ['Points'])
f6 = hv.Scatter((zip(counts.Segments.items(), counts.Nodes.items())), ['Segments'], ['Nodes'])
f1 + f2 + f3 + f4 + f5 + f6
#%%
import holoviews as hv
from holoviews import opts
hv.extension('bokeh')
from holoviews.operation import gridmatrix
from bokeh.sampledata.iris import flowers
from bokeh.palettes import brewer
import bokeh.models as bmod
counts = | pd.read_csv(ippath) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright StateOfTheArt.quant.
#
# * Commercial Usage: please contact <EMAIL>
# * Non-Commercial Usage:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scipy.stats import rankdata
import torch
import numpy as np
import pandas as pd
from featurizer.functions.algebra_statistic import weighted_average, weighted_std, downside_std, upside_std
import pdb
# https://stackoverflow.com/questions/14313510/how-to-calculate-moving-average-using-numpy
def rolling_sum(tensor, window=1, dim=0):
ret = torch.cumsum(tensor, dim=dim)
ret[window:] = ret[window:] - ret[:-window]
ret[:window-1]= float("nan")
return ret
def rolling_sum_(tensor, window=1, dim=0):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).sum()
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_sum3d(tensor, window=1, dim=1):
ret = torch.cumsum(tensor, dim=dim)
ret[:,window:] = ret[:,window:] - ret[:,:-window]
ret[:,:window-1]= float("nan")
return ret
def rolling_mean(tensor, window=1):
#to-do fixme
#ret = torch.cumsum(tensor, dim=0)
#ret[window:] = ret[window:] - ret[:-window]
#ret[:window-1]= float("nan")
#output = ret/window
return rolling_mean_(tensor=tensor, window=window)
def rolling_mean_(tensor, window=1):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = | pd.DataFrame(tensor_np) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from pandas.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, IntervalIndex, Series, date_range)
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype.construct_from_string('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_any_dtype(self.dtype)
assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string('datetime64[ns, notatz]')
with pytest.raises(TypeError,
match="^Could not construct DatetimeTZDtype$"):
DatetimeTZDtype.construct_from_string(['datetime64[ns, notatz]'])
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
assert is_datetime64tz_dtype(s1)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
assert is_datetime64tz_dtype(s2)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern'])
@pytest.mark.parametrize('constructor', ['M8', 'datetime64'])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = '{con}[ns, {tz}]'.format(con=constructor, tz=tz)
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype('ns', tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is PeriodDtype('period[3D]')
assert PeriodDtype('period[1S1U]') == PeriodDtype('period[1000001U]')
assert PeriodDtype('period[1S1U]') is PeriodDtype('period[1000001U]')
def test_compat(self):
assert not is_datetime64_ns_dtype(self.dtype)
assert not is_datetime64_ns_dtype('period[D]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('period[D]')
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
assert PeriodDtype.is_dtype(self.dtype)
assert PeriodDtype.is_dtype('period[D]')
assert PeriodDtype.is_dtype('period[3D]')
assert PeriodDtype.is_dtype(PeriodDtype('3D'))
assert PeriodDtype.is_dtype('period[U]')
assert PeriodDtype.is_dtype('period[S]')
assert PeriodDtype.is_dtype(PeriodDtype('U'))
assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
assert not PeriodDtype.is_dtype('U')
assert not PeriodDtype.is_dtype('S')
assert not PeriodDtype.is_dtype('foo')
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'period[D]')
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
def test_basic(self):
assert is_period_dtype(self.dtype)
pidx = pd.period_range('2013-01-01 09:00', periods=5, freq='H')
assert is_period_dtype(pidx.dtype)
assert is_period_dtype(pidx)
with tm.assert_produces_warning(FutureWarning):
assert is_period(pidx)
s = Series(pidx, name='A')
assert is_period_dtype(s.dtype)
assert is_period_dtype(s)
with tm.assert_produces_warning(FutureWarning):
assert is_period(s)
assert not is_period_dtype(np.dtype('float64'))
assert not is_period_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert not is_period(np.dtype('float64'))
with tm.assert_produces_warning(FutureWarning):
assert not is_period(1.0)
def test_empty(self):
dt = PeriodDtype()
with pytest.raises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
assert not is_string_dtype(PeriodDtype('D'))
class TestIntervalDtype(Base):
def create(self):
return IntervalDtype('int64')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = IntervalDtype('int64')
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype3
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype1 = IntervalDtype('interval')
dtype2 = IntervalDtype(dtype1)
dtype3 = IntervalDtype('interval')
assert dtype2 == dtype1
assert dtype2 == dtype2
assert dtype2 == dtype3
assert dtype2 is dtype1
assert dtype2 is dtype2
assert dtype2 is dtype3
assert hash(dtype2) == hash(dtype1)
assert hash(dtype2) == hash(dtype2)
assert hash(dtype2) == hash(dtype3)
@pytest.mark.parametrize('subtype', [
'interval[int64]', 'Interval[int64]', 'int64', np.dtype('int64')])
def test_construction(self, subtype):
i = IntervalDtype(subtype)
assert i.subtype == np.dtype('int64')
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_construction_generic(self, subtype):
# generic
i = IntervalDtype(subtype)
assert i.subtype is None
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [
CategoricalDtype(list('abc'), False),
CategoricalDtype(list('wxyz'), True),
object, str, '<U10', 'interval[category]', 'interval[object]'])
def test_construction_not_supported(self, subtype):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalDtype')
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
@pytest.mark.parametrize('subtype', ['xx', 'IntervalA', 'Interval[foo]'])
def test_construction_errors(self, subtype):
msg = 'could not construct IntervalDtype'
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
assert is_dtype_equal(self.dtype, result)
result = IntervalDtype.construct_from_string('interval[int64]')
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
msg = 'a string needs to be passed, got type'
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize('string', [
'foo', 'foo[int64]', 'IntervalA'])
def test_construction_from_string_error_subtype(self, string):
# this is an invalid subtype
msg = ("Incorrectly formatted string passed to constructor. "
r"Valid formats include Interval or Interval\[dtype\] "
"where dtype is numeric, datetime, or timedelta")
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
a = IntervalDtype('interval[int64]')
b = IntervalDtype('interval[int64]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_is_dtype(self):
assert IntervalDtype.is_dtype(self.dtype)
assert IntervalDtype.is_dtype('interval')
assert IntervalDtype.is_dtype(IntervalDtype('float64'))
assert IntervalDtype.is_dtype(IntervalDtype('int64'))
assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not IntervalDtype.is_dtype('D')
assert not IntervalDtype.is_dtype('3D')
assert not IntervalDtype.is_dtype('U')
assert not IntervalDtype.is_dtype('S')
assert not IntervalDtype.is_dtype('foo')
assert not IntervalDtype.is_dtype('IntervalA')
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'interval[int64]')
assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
assert is_dtype_equal(IntervalDtype('int64'), IntervalDtype('int64'))
assert not is_dtype_equal(self.dtype, 'int64')
assert not is_dtype_equal(IntervalDtype('int64'),
IntervalDtype('float64'))
# invalid subtype comparisons do not raise when directly compared
dtype1 = IntervalDtype('float64')
dtype2 = IntervalDtype('datetime64[ns, US/Eastern]')
assert dtype1 != dtype2
assert dtype2 != dtype1
@pytest.mark.parametrize('subtype', [
None, 'interval', 'Interval', 'int64', 'uint64', 'float64',
'complex128', 'datetime64', 'timedelta64', PeriodDtype('Q')])
def test_equality_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert is_dtype_equal(dtype, 'interval')
assert is_dtype_equal(dtype, IntervalDtype())
@pytest.mark.parametrize('subtype', [
'int64', 'uint64', 'float64', 'complex128', 'datetime64',
'timedelta64', PeriodDtype('Q')])
def test_name_repr(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
expected = 'interval[{subtype}]'.format(subtype=subtype)
assert str(dtype) == expected
assert dtype.name == 'interval'
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_name_repr_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert str(dtype) == 'interval'
assert dtype.name == 'interval'
def test_basic(self):
assert is_interval_dtype(self.dtype)
ii = IntervalIndex.from_breaks(range(3))
assert is_interval_dtype(ii.dtype)
assert is_interval_dtype(ii)
s = Series(ii, name='A')
assert is_interval_dtype(s.dtype)
assert is_interval_dtype(s)
def test_basic_dtype(self):
assert is_interval_dtype('interval[int64]')
assert is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))
assert is_interval_dtype(IntervalIndex.from_breaks(np.arange(4)))
assert is_interval_dtype(IntervalIndex.from_breaks(
date_range('20130101', periods=3)))
assert not is_interval_dtype('U')
assert not is_interval_dtype('S')
assert not is_interval_dtype('foo')
assert not is_interval_dtype(np.object_)
assert not is_interval_dtype(np.int64)
assert not is_interval_dtype(np.float64)
def test_caching(self):
IntervalDtype.reset_cache()
dtype = IntervalDtype("int64")
assert len(IntervalDtype._cache) == 1
IntervalDtype("interval")
assert len(IntervalDtype._cache) == 2
IntervalDtype.reset_cache()
tm.round_trip_pickle(dtype)
assert len(IntervalDtype._cache) == 0
class TestCategoricalDtypeParametrized(object):
@pytest.mark.parametrize('categories', [
list('abcd'),
np.arange(1000),
['a', 'b', 10, 2, 1.3, True],
[True, False],
pd.date_range('2017', periods=4)])
def test_basic(self, categories, ordered):
c1 = CategoricalDtype(categories, ordered=ordered)
tm.assert_index_equal(c1.categories, pd.Index(categories))
assert c1.ordered is ordered
def test_order_matters(self):
categories = ['a', 'b']
c1 = CategoricalDtype(categories, ordered=True)
c2 = CategoricalDtype(categories, ordered=False)
c3 = CategoricalDtype(categories, ordered=None)
assert c1 is not c2
assert c1 is not c3
@pytest.mark.parametrize('ordered', [False, None])
def test_unordered_same(self, ordered):
c1 = CategoricalDtype(['a', 'b'], ordered=ordered)
c2 = CategoricalDtype(['b', 'a'], ordered=ordered)
assert hash(c1) == hash(c2)
def test_categories(self):
result = CategoricalDtype(['a', 'b', 'c'])
tm.assert_index_equal(result.categories, pd.Index(['a', 'b', 'c']))
assert result.ordered is None
def test_equal_but_different(self, ordered):
c1 = CategoricalDtype([1, 2, 3])
c2 = CategoricalDtype([1., 2., 3.])
assert c1 is not c2
assert c1 != c2
@pytest.mark.parametrize('v1, v2', [
([1, 2, 3], [1, 2, 3]),
([1, 2, 3], [3, 2, 1]),
])
def test_order_hashes_different(self, v1, v2):
c1 = CategoricalDtype(v1, ordered=False)
c2 = CategoricalDtype(v2, ordered=True)
c3 = CategoricalDtype(v1, ordered=None)
assert c1 is not c2
assert c1 is not c3
def test_nan_invalid(self):
with pytest.raises(ValueError):
CategoricalDtype([1, 2, np.nan])
def test_non_unique_invalid(self):
with pytest.raises(ValueError):
CategoricalDtype([1, 2, 1])
def test_same_categories_different_order(self):
c1 = CategoricalDtype(['a', 'b'], ordered=True)
c2 = CategoricalDtype(['b', 'a'], ordered=True)
assert c1 is not c2
@pytest.mark.parametrize('ordered1', [True, False, None])
@pytest.mark.parametrize('ordered2', [True, False, None])
def test_categorical_equality(self, ordered1, ordered2):
# same categories, same order
# any combination of None/False are equal
# True/True is the only combination with True that are equal
c1 = CategoricalDtype(list('abc'), ordered1)
c2 = CategoricalDtype(list('abc'), ordered2)
result = c1 == c2
expected = bool(ordered1) is bool(ordered2)
assert result is expected
# same categories, different order
# any combination of None/False are equal (order doesn't matter)
# any combination with True are not equal (different order of cats)
c1 = CategoricalDtype(list('abc'), ordered1)
c2 = CategoricalDtype(list('cab'), ordered2)
result = c1 == c2
expected = (bool(ordered1) is False) and (bool(ordered2) is False)
assert result is expected
# different categories
c2 = CategoricalDtype([1, 2, 3], ordered2)
assert c1 != c2
# none categories
c1 = CategoricalDtype(list('abc'), ordered1)
c2 = CategoricalDtype(None, ordered2)
c3 = CategoricalDtype(None, ordered1)
assert c1 == c2
assert c2 == c1
assert c2 == c3
@pytest.mark.parametrize('categories', [list('abc'), None])
@pytest.mark.parametrize('other', ['category', 'not a category'])
def test_categorical_equality_strings(self, categories, ordered, other):
c1 = CategoricalDtype(categories, ordered)
result = c1 == other
expected = other == 'category'
assert result is expected
def test_invalid_raises(self):
with pytest.raises(TypeError, match='ordered'):
CategoricalDtype(['a', 'b'], ordered='foo')
with pytest.raises(TypeError, match="'categories' must be list-like"):
CategoricalDtype('category')
def test_mixed(self):
a = CategoricalDtype(['a', 'b', 1, 2])
b = CategoricalDtype(['a', 'b', '1', '2'])
assert hash(a) != hash(b)
def test_from_categorical_dtype_identity(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# Identity test for no changes
c2 = CategoricalDtype._from_categorical_dtype(c1)
assert c2 is c1
def test_from_categorical_dtype_categories(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# override categories
result = CategoricalDtype._from_categorical_dtype(
c1, categories=[2, 3])
assert result == CategoricalDtype([2, 3], ordered=True)
def test_from_categorical_dtype_ordered(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# override ordered
result = CategoricalDtype._from_categorical_dtype(
c1, ordered=False)
assert result == CategoricalDtype([1, 2, 3], ordered=False)
def test_from_categorical_dtype_both(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# override ordered
result = CategoricalDtype._from_categorical_dtype(
c1, categories=[1, 2], ordered=False)
assert result == CategoricalDtype([1, 2], ordered=False)
def test_str_vs_repr(self, ordered):
c1 = CategoricalDtype(['a', 'b'], ordered=ordered)
assert str(c1) == 'category'
# Py2 will have unicode prefixes
pat = r"CategoricalDtype\(categories=\[.*\], ordered={ordered}\)"
assert re.match(pat.format(ordered=ordered), repr(c1))
def test_categorical_categories(self):
# GH17884
c1 = CategoricalDtype(Categorical(['a', 'b']))
tm.assert_index_equal(c1.categories, pd.Index(['a', 'b']))
c1 = CategoricalDtype(CategoricalIndex(['a', 'b']))
tm.assert_index_equal(c1.categories, | pd.Index(['a', 'b']) | pandas.Index |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import _pickle as cPickle
import argparse
from copy import deepcopy
import japanize_matplotlib
import lightgbm as lgb
import matplotlib.pyplot as plt
import pickle
from sklearn.metrics import mean_squared_error
import time
from tqdm import tqdm
import os
code_path = os.path.dirname(os.path.abspath(__file__))
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('seed', type=int)
arg('iteration_mul', type=float)
arg('train_file', type=str)
arg('test_file', type=str)
arg('--learning_rate', type=float, default=0.05)
arg('--num_leaves', type=int, default=31)
arg('--n_estimators', type=int, default=500)
args = parser.parse_args()#args=['1', '0.5','train_fe.ftr', 'test_fe.ftr'])
# print(args)
train_fe = pd.read_feather(f'{code_path}/../prepare_data/{args.train_file}')
test_fe = pd.read_feather(f'{code_path}/../prepare_data/{args.test_file}')
target_fe = train_fe['meter_reading']
train_fe = train_fe.drop('meter_reading', axis=1)
X_train = train_fe.query('20160115 <= timestamp < 20160601 & site_id != 0')
X_valid = train_fe.query('20160901 <= timestamp < 20170101 & site_id != 0')
X_test = test_fe
y_train = target_fe.loc[X_train.index]
y_valid = target_fe.loc[X_valid.index]
# y_train = np.log1p(y_train)
# y_valid = np.log1p(y_valid)
X_train = X_train.drop('timestamp', axis=1)
X_valid = X_valid.drop('timestamp', axis=1)
X_test = X_test.drop('timestamp', axis=1)
# print(X_train.shape)
def meter_predict(meter, model, X_test, best_iteration, iteration_mul=1.5):
X_test_m = X_test.query('meter == {}'.format(meter)).drop('meter', axis=1)
g = X_test_m.groupby('building_id')
y_pred = []
for building_id in tqdm(sorted(X_test_m['building_id'].unique())):
X_building = g.get_group(building_id)
y_pred.append(pd.Series(model.predict(X_building, n_jobs=4,num_iteration=min(models_all[meter].n_estimators, int(best_iteration[meter][building_id]*iteration_mul))), index=X_building.index))
return pd.concat(y_pred).sort_index()
# load model
load_name = '{}/../model/model_use_{}_seed{}_leave{}_lr{}_tree{}.pkl'.format(code_path, args.train_file.replace('.ftr', ''),args.seed, args.num_leaves, str(args.learning_rate).replace('.', ''), args.n_estimators)
with open(load_name, 'rb') as f:
models = pickle.load(f)
# with open(f'{code_path}/../model/model_5_95_hokan_cleaning_50000tree_seed{}.pkl'.format(args.seed), 'wb') as f:
# pickle.dump(models, f)
# 各building, meter毎の最良のiteration数
best_iteration = dict()
for meter in [0,1,2,3]:
best_iteration[meter] = dict()
# for i in range(1448):
# best_iteration[meter][i] = 200
for i in tqdm(sorted(X_valid.query('meter == {}'.format(meter))['building_id'].unique())):
best_iteration[meter][i] = max(20, np.argmin(np.array(models[meter].evals_result_[i]['rmse'])) + 1)
# best_iteration[meter][i] = np.argmin(np.array(models[meter].evals_result_[i]['rmse'])) + 1
del_list = [list(), list(), list(), list()]
for meter in [0,1,2,3]:
for buildingID, itr in best_iteration[meter].items():
if itr<=20:
del_list[meter].append(buildingID)
if itr<=100:
best_iteration[meter][buildingID] = 100
# if itr>=int(models[0].n_estimators * 0.98):
# best_iteration[meter][buildingID] = models[0].n_estimatorss
for meter in [0,1,2,3]:
for i in range(1448):
if i not in best_iteration[meter]:
best_iteration[meter][i] = 200
#load model
load_name = '{}/../model/model_all_use_{}_seed{}_leave{}_lr{}_tree{}.pkl'.format(code_path, args.train_file.replace('.ftr', ''),args.seed, args.num_leaves, str(args.learning_rate).replace('.', ''), args.n_estimators)
with open(load_name, 'rb') as f:
models_all = pickle.load(f)
# meter type毎のtestの予測
preds = list()
for i in tqdm([3,2,1,0]):
preds.append(meter_predict(i, models_all[i], X_test, best_iteration, iteration_mul=args.iteration_mul))
y_preds = pd.concat(preds).sort_index()
# lgb.plot_importance(models_all[0], importance_type='gain', figsize=(10,20))
# lgb.plot_importance(models_all[0], importance_type='split', figsize=(10,20))
submission = | pd.read_csv(f'{code_path}/../input/sample_submission.csv') | pandas.read_csv |
""" test get/set & misc """
from datetime import timedelta
import re
import numpy as np
import pytest
from pandas import (
DataFrame,
IndexSlice,
MultiIndex,
Series,
Timedelta,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
def test_basic_indexing():
s = Series(np.random.randn(5), index=["a", "b", "a", "a", "b"])
msg = "index 5 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s[5]
with pytest.raises(IndexError, match=msg):
s[5] = 0
with pytest.raises(KeyError, match=r"^'c'$"):
s["c"]
s = s.sort_index()
with pytest.raises(IndexError, match=msg):
s[5]
msg = r"index 5 is out of bounds for axis (0|1) with size 5|^5$"
with pytest.raises(IndexError, match=msg):
s[5] = 0
def test_basic_getitem_with_labels(datetime_series):
indices = datetime_series.index[[5, 10, 15]]
result = datetime_series[indices]
expected = datetime_series.reindex(indices)
tm.assert_series_equal(result, expected)
result = datetime_series[indices[0] : indices[2]]
expected = datetime_series.loc[indices[0] : indices[2]]
tm.assert_series_equal(result, expected)
def test_basic_getitem_dt64tz_values():
# GH12089
# with tz for values
ser = Series(
date_range("2011-01-01", periods=3, tz="US/Eastern"), index=["a", "b", "c"]
)
expected = Timestamp("2011-01-01", tz="US/Eastern")
result = ser.loc["a"]
assert result == expected
result = ser.iloc[0]
assert result == expected
result = ser["a"]
assert result == expected
def test_getitem_setitem_ellipsis():
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
tm.assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
@pytest.mark.parametrize(
"result_1, duplicate_item, expected_1",
[
[
Series({1: 12, 2: [1, 2, 2, 3]}),
Series({1: 313}),
Series({1: 12}, dtype=object),
],
[
Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
Series({1: [1, 2, 3]}),
Series({1: [1, 2, 3]}),
],
],
)
def test_getitem_with_duplicates_indices(result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
tm.assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_setitem_integers():
# caused bug without test
s = Series([1, 2, 3], ["a", "b", "c"])
assert s.iloc[0] == s["a"]
s.iloc[0] = 5
tm.assert_almost_equal(s["a"], 5)
def test_series_box_timestamp():
rng = date_range("20090415", "20090519", freq="B")
ser = Series(rng)
assert isinstance(ser[0], Timestamp)
assert isinstance(ser.at[1], Timestamp)
assert isinstance(ser.iat[2], Timestamp)
assert isinstance(ser.loc[3], Timestamp)
assert isinstance(ser.iloc[4], Timestamp)
ser = Series(rng, index=rng)
assert isinstance(ser[0], Timestamp)
assert isinstance(ser.at[rng[1]], Timestamp)
assert isinstance(ser.iat[2], Timestamp)
assert isinstance(ser.loc[rng[3]], Timestamp)
assert isinstance(ser.iloc[4], Timestamp)
def test_series_box_timedelta():
rng = timedelta_range("1 day 1 s", periods=5, freq="h")
ser = Series(rng)
assert isinstance(ser[0], Timedelta)
assert isinstance(ser.at[1], Timedelta)
assert isinstance(ser.iat[2], Timedelta)
assert isinstance(ser.loc[3], Timedelta)
assert isinstance(ser.iloc[4], Timedelta)
def test_getitem_ambiguous_keyerror(indexer_sl):
ser = Series(range(10), index=list(range(0, 20, 2)))
with pytest.raises(KeyError, match=r"^1$"):
indexer_sl(ser)[1]
def test_getitem_dups_with_missing(indexer_sl):
# breaks reindex, so need to use .loc internally
# GH 4246
ser = Series([1, 2, 3, 4], ["foo", "bar", "foo", "bah"])
with pytest.raises(KeyError, match=re.escape("['bam'] not in index")):
indexer_sl(ser)[["foo", "bar", "bah", "bam"]]
def test_setitem_ambiguous_keyerror(indexer_sl):
s = Series(range(10), index=list(range(0, 20, 2)))
# equivalent of an append
s2 = s.copy()
indexer_sl(s2)[1] = 5
expected = s.append(Series([5], index=[1]))
tm.assert_series_equal(s2, expected)
def test_setitem(datetime_series, string_series):
datetime_series[datetime_series.index[5]] = np.NaN
datetime_series[[1, 2, 17]] = np.NaN
datetime_series[6] = np.NaN
assert np.isnan(datetime_series[6])
assert np.isnan(datetime_series[2])
datetime_series[np.isnan(datetime_series)] = 5
assert not np.isnan(datetime_series[2])
def test_setslice(datetime_series):
sl = datetime_series[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique is True
# FutureWarning from NumPy about [slice(None, 5).
@pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
def test_basic_getitem_setitem_corner(datetime_series):
# invalid tuples, e.g. td.ts[:, None] vs. td.ts[:, 2]
msg = "key of type tuple not found and not a MultiIndex"
with pytest.raises(KeyError, match=msg):
datetime_series[:, 2]
with pytest.raises(KeyError, match=msg):
datetime_series[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
with tm.assert_produces_warning(FutureWarning):
# GH#31299
result = datetime_series[[slice(None, 5)]]
expected = datetime_series[:5]
tm.assert_series_equal(result, expected)
# OK
msg = r"unhashable type(: 'slice')?"
with pytest.raises(TypeError, match=msg):
datetime_series[[5, slice(None, None)]]
with pytest.raises(TypeError, match=msg):
datetime_series[[5, slice(None, None)]] = 2
def test_slice(string_series, object_series):
numSlice = string_series[10:20]
numSliceEnd = string_series[-10:]
objSlice = object_series[10:20]
assert string_series.index[9] not in numSlice.index
assert object_series.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert string_series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == string_series.index[11]
assert tm.equalContents(numSliceEnd, np.array(string_series)[-10:])
# Test return view.
sl = string_series[10:20]
sl[:] = 0
assert (string_series[10:20] == 0).all()
def test_timedelta_assignment():
# GH 8209
s = Series([], dtype=object)
s.loc["B"] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta("1 days"), index=["B"]))
s = s.reindex(s.index.insert(0, "A"))
tm.assert_series_equal(s, Series([np.nan, Timedelta("1 days")], index=["A", "B"]))
s.loc["A"] = timedelta(1)
expected = Series(Timedelta("1 days"), index=["A", "B"])
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion():
# GH 4080
df = DataFrame({c: [1, 2, 3] for c in ["a", "b", "c"]})
return_value = df.set_index(["a", "b", "c"], inplace=True)
assert return_value is None
s = Series([1], index=[(2, 2, 2)])
df["val"] = 0
df
df["val"].update(s)
expected = DataFrame(
{"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3], "val": [0, 1, 0]}
)
return_value = expected.set_index(["a", "b", "c"], inplace=True)
assert return_value is None
tm.assert_frame_equal(df, expected)
def test_preserve_refs(datetime_series):
seq = datetime_series[[5, 10, 15]]
seq[1] = np.NaN
assert not np.isnan(datetime_series[10])
def test_cast_on_putmask():
# GH 2746
# need to upcast
s = Series([1, 2], index=[1, 2], dtype="int64")
s[[True, False]] = Series([0], index=[1], dtype="int64")
expected = Series([0, 2], index=[1, 2], dtype="int64")
| tm.assert_series_equal(s, expected) | pandas._testing.assert_series_equal |
# coding: utf-8
# # Guild Wars 2 Achievement System Analysis
# Guild Wars 2 is a Massively multiplayer online role-playing game created by ArenaNet, which tends to cater to a more casual players and focuses more on cooperative play with some single player campaign.
#
# This project analyzes the game achievement systems in order to determine which achievement players should pursue according to their play style and time commitment to the game. This as done by looking at the rewards of each achievements and determining their importance based on their monetary value, the strength and the rarity of the items received.
#
# Using supervised learning and clustering methods on the rewards given for completing achievements I predict the financial worth of each achievement in game. Specifically, I used the Linear Regression and Decision Trees Regression to create a model which will predict the importance of an achievement based on the vendor value of the reward players received from completing the achievement. I also used Forest Trees to classify those achievements according to categories, so it will be possible to predict the type of achievements which the players should pursue
# In[1]:
import sklearn as sk
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
from scipy import io
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import model_selection, metrics, linear_model, datasets, feature_selection
# # Cleaning The Data
#
# To get the information I used the Guild Wars 2 API (found here: https://wiki.guildwars2.com/wiki/API:Main)
# For the analysis I used the following data:
#
# Achievement Category: which gave me the information about the type of achievement it is and was used to cluster the data
#
# Achievement Points: the amount of points players receive from each achievement, collecting those point allow the players to collect better rewards in the future
#
# Reward Type: the type of reward the player gets for completing the achievement
#
# Item Type: the type of item the player gets as a reward
#
# Rarity: the rarity of the item the player gets
#
# Level of Reward: the level of the reward (80 is the max level in the game)
#
# Vendor Value: the sell value (in coins) of the reward item
# In[2]:
#import CSV
Achievements = pd.read_csv("clean Data/Achievement.csv")
Items = pd.read_csv("clean Data/ItemFin.csv")
Titles = | pd.read_csv("clean Data/titles.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import os
import sys
import tensorflow as tf
import json
import joblib
import time
from tensorflow import keras
from keras import optimizers
from datetime import datetime,timedelta
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime
pd.set_option('display.max_columns', None)
#---------------------------------------
# variables
#---------------------------------------
start = time.time()
DATASET_NUM = 7
MODEL_NUM = 10
# path
PATH_BASE = './'
PATH_MODEL = PATH_BASE + '/model/'
PATH_RESULT = PATH_BASE + '/result/'
# power capacity
power_nm_list = ['onm1_h','onm2_h','onm3_h','onm4_h']
capacity_list = [89.7, 96.6, 90, 46.2]
RSRS_ID = 0
POWER_NM = power_nm_list[RSRS_ID]
CAPACITY = capacity_list[RSRS_ID]
print("POWER_NM:{}, CAPACITY:{}".format(POWER_NM,CAPACITY))
# timesteps
SHIFT_DAYS = 7
PRED_STEPS = 24
dataX_STEPS = SHIFT_DAYS*PRED_STEPS
#---------------------------------------
# functions
#---------------------------------------
# 이상치 nan 처리
def power_anomal(x) :
if x > CAPACITY :
return np.nan
return x
def sensor_anomal(x) :
if x < -900 :
return np.nan
return x
# load sol omn
def load_power(POWER_NM):
df_power = pd.read_csv(PATH_BASE + '/df_power.csv',index_col=0)
df_power['POWER']=df_power['POWER'].apply(power_anomal).apply(lambda x:x)
df_power.sort_values(by=['DATE'], axis=0)
df_power = df_power.set_index(pd.DatetimeIndex(df_power['DATE']))
df_power.drop(['_id','DATE'], axis=1, inplace=True)
df_power = df_power.interpolate(method='linear',limit_direction='forward')
return df_power
# load sensor
def load_sensor():
df_sensor= pd.read_csv(PATH_BASE + '/df_sensor.csv',index_col=0)
df_sensor.sort_values(by=['DATE'], axis=0)
df_sensor = df_sensor.set_index(pd.DatetimeIndex(df_sensor['DATE']))
df_sensor.drop(['_id','DATE'], axis=1, inplace=True)
df_sensor['uv']=df_sensor['uv'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['solarradiation']=df_sensor['solarradiation'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['humidity']=df_sensor['humidity'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['windspeed']=df_sensor['windspeed'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['windgust']=df_sensor['windgust'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['temp']=df_sensor['temp'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['winddir']=df_sensor['winddir'].apply(sensor_anomal).apply(lambda x:x)
df_sensor = df_sensor.interpolate(method='linear',limit_direction='forward')
return df_sensor
def get_df(df_power, df_sensor, POWER_NM):
# load the scaler
power_scaler = joblib.load(open('{}scaler/power_{}.pkl'.format(PATH_MODEL,POWER_NM[:-2]), 'rb'))
weather_scaler = joblib.load(open('{}scaler/weather.pkl'.format(PATH_MODEL), 'rb'))
# power
scaledpower = power_scaler.fit_transform(df_power.values)
scaledpower_df = pd.DataFrame(scaledpower, columns=df_power.columns, index=list(df_power.index.values))
# weather
df_weather = df_sensor.copy()
df_weather.drop(['dailyrainin','weeklyrainin','monthlyrainin','yearlyrainin'], axis=1, inplace=True)
scaledweather = weather_scaler.fit_transform(df_weather.values)
scaledweather_df = pd.DataFrame(scaledweather, columns=df_weather.columns, index=list(df_weather.index.values))
# JOIN (index merge)
df = pd.merge(scaledpower_df,scaledweather_df, how='outer',left_index=True, right_index=True)
df = df[[ 'POWER', 'solarradiation', 'humidity', 'windspeed', 'windgust', 'temp', 'winddir' ]]
df = df.interpolate(method='linear')
return power_scaler, df
#---------------------------------------
# MODEL_TYPE iteration
#---------------------------------------
total_accRate = 0
total_accRate_list = []
result_pred = pd.DataFrame()
result_acc = pd.DataFrame()
result_target= pd.DataFrame()
for m in range(0,MODEL_NUM):
# for m in range(0,1):
model = tf.keras.models.load_model(PATH_MODEL+'model'+str(m)+'.h5')
print("\n\n MODEL", m, "-"*100)
accRate_sum = 0
#---------------------------------------
# dataset iteration
#---------------------------------------
for T in range(0,DATASET_NUM):
PRED_DAY = datetime(2021, 8, 25, 0,0,0)+timedelta(T)
PRED_DAY = datetime(PRED_DAY.year, PRED_DAY.month, PRED_DAY.day, 0,0,0)
X_START = PRED_DAY - timedelta(7)
X_END = PRED_DAY - timedelta(1)
X_END = datetime(X_END.year, X_END.month, X_END.day, 23,0,0)
# print("X DATA: {} ~ {} => PRED: {} ".format(str(X_START)[:10], str(X_END)[:10], str(PRED_DAY)[:10]))
# get data
df_power = load_power(POWER_NM)
df_sensor = load_sensor()
power_scaler, df = get_df(df_power, df_sensor,POWER_NM)
# create x,y arr
x_arr = []
X_df = df.loc[str(X_START):str(X_END)]
x_arr.append(X_df.iloc[:].values.tolist())
x_arr=np.asarray(x_arr).astype(np.float64)
y_arr = []
Y_df = df.loc[str(PRED_DAY):str(PRED_DAY + timedelta(1))]
y_arr.append(Y_df.iloc[:,[0]].values.tolist())
y_arr=np.asarray(y_arr).astype(np.float64)
#---------------------------------------
# predict
#---------------------------------------
n_dataset= x_arr.shape[0]
predList=[]
accRate=[]
yList=[]
pred = model.predict([x_arr])
pred[pred<0] = 0
pred = pred[:,:,0]
pred = power_scaler.inverse_transform(pred)
predList = pred.reshape(-1,1)
#---------------------------------------
# calculate predictaccRate
#---------------------------------------
if(str(PRED_DAY.strftime("%Y-%m-%d")) > str(df.index[-1])[:10]):
for hr in range(0, PRED_STEPS):
accRate.append(0)
else:
y = power_scaler.inverse_transform(y_arr[:,:,0])
yList = y.reshape(-1,1)
for hr in range(0, PRED_STEPS):
pred = predList[hr]
target = yList[hr]
difference = np.abs(target-pred)
accRate.append(100-np.round(difference/CAPACITY*100, 2))
accRate_df = pd.DataFrame(np.array(accRate).reshape(1,-1))
accRate_df.insert(0,'PRED_DATE',PRED_DAY, allow_duplicates=False)
accRate_df.insert(0,'MODEL',m, allow_duplicates=False)
pred_df = pd.DataFrame(np.array(predList).reshape(1,-1))
pred_df.insert(0,'PRED_DATE',PRED_DAY, allow_duplicates=False)
pred_df.insert(0,'MODEL',m, allow_duplicates=False)
y_df = pd.DataFrame(np.array(yList).reshape(1,-1))
y_df.insert(0,'PRED_DATE',PRED_DAY, allow_duplicates=False)
y_df.insert(0,'MODEL',m, allow_duplicates=False)
mean_accRate = np.round(accRate_df.mean(axis = 1,numeric_only = True)[0],2)
accRate_sum = accRate_sum + mean_accRate
print("dataset {} : {}".format(T+1,mean_accRate))
if result_pred.shape[0] == 0:
result_pred = pred_df
result_acc = accRate_df
result_target= y_df
else:
result_pred = pd.concat([result_pred, pred_df])
result_acc = | pd.concat([result_acc, accRate_df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 16:01:32 2018
@author: Adam
"""
import os
import glob
from numbers import Number
import numpy as np
import pandas as pd
def sub_dire(base, dire, fname=None):
""" Build path to a base/dire. Create if does not exist."""
if base is None:
raise ValueError(f"base={base} is not valid")
else:
path = os.path.join(base, dire)
if not os.path.exists(path):
os.makedirs(path)
if fname is not None:
path = os.path.join(path, fname)
return path
def ls(dire, regex="*", full_output=True, report=False):
""" List the contents of dire.
e.g., to list pickle files in the cache,
ls(h5.cache_dire, regex="*.pkl")
"""
# folder
if dire is None:
raise Exception("Cannot read from None directory.")
# check exists
if not os.path.isdir(dire):
raise Exception(f"{dire} does not exist.")
fils = glob.glob(os.path.join(dire, regex))
if report:
print(f"Found {len(fils)} matches to {regex} in {dire}.")
if full_output:
return fils
fnames = [os.path.split(f)[1] for f in fils]
return fnames
def to_pickle(obj, dire, fname, overwrite=False, **kwargs):
""" save `obj` as [dire]/[fname].pkl
args:
obj python object to save
dire directory
fname file name
overwrite=False
kwargs:
[passed to pandas.to_pickle()]
"""
fname, _ = os.path.splitext(fname)
fname += ".pkl"
fil = os.path.join(dire, fname)
# checks
if not os.path.isdir(dire):
raise OSError(f"{dire} not found")
elif os.path.exists(fil) and not overwrite:
raise OSError(f"{fil} already exists. Use overwrite=True")
else:
pd.to_pickle(obj, fil, **kwargs)
def read_pickle(dire, fname, **kwargs):
""" read `obj` from [dire]/[fname].pkl
args:
obj python object to save
dire directory
fname file name.
kwargs:
[passed to pandas.read_pickle()]
"""
fname, _ = os.path.splitext(fname)
fname += ".pkl"
fil = os.path.join(dire, fname)
# checks
if not os.path.exists(fil):
raise OSError(f"{fil} not found")
else:
pd.read_pickle(fil, **kwargs)
def t_index(time, dt=1.0, t0=0.0):
""" Convert time to index using dt [and t0].
"""
if isinstance(time, Number):
return int(round((time - t0) / dt))
elif isinstance(time, tuple):
return tuple([int(round((t - t0) / dt)) for t in time])
elif isinstance(time, list):
return list([int(round((t - t0) / dt)) for t in time])
elif isinstance(time, np.ndarray):
return np.array([int(round((t - t0) / dt)) for t in time])
else:
raise TypeError("time must be a number or list of numbers.")
def utf8_attrs(info):
""" Convert bytes to utf8
args:
info dict()
return:
info dict() (decoded to utf8)
"""
for key, val in info.items():
if isinstance(val, bytes):
info[key] = val.decode("utf8")
return info
def add_level(df, label, position="first"):
""" Add a level to pd.MultiIndex columns.
This can be useful when joining DataFrames with / without multiindex
columns.
>>> st = statistics(df, groupby="squid") # MultiIndex DataFrame
>>> add_level(h5.var, "VAR").join(st)
args:
df object to add index level to pd.DataFrame()
label= value(s) of the added level(s) str() / list(str)
position=0
position of level to add "first", "last" or int
return:
df.copy() with pd.MultiIndex()
"""
df2 = df.copy()
# multiple labels?
if isinstance(label, str):
# ..nope...
label = [label]
# reverse the label list (more intuitve behaviour?)
label = label[::-1]
# position is first?
if position == "first":
position = 0
# if df is Series then convert to DataFrame
if isinstance(df2, pd.Series):
df2 = | pd.DataFrame(df2) | pandas.DataFrame |
from __future__ import annotations
from datetime import (
datetime,
time,
timedelta,
tzinfo,
)
from typing import (
TYPE_CHECKING,
Literal,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
tslib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Resolution,
Timestamp,
conversion,
fields,
get_resolution,
iNaT,
ints_to_pydatetime,
is_date_array_normalized,
normalize_i8_timestamps,
timezones,
to_offset,
tzconversion,
)
from pandas._typing import npt
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_inclusive
from pandas.core.dtypes.cast import astype_dt64_to_dt64tz
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_object_dtype,
is_period_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import (
ExtensionArray,
datetimelike as dtl,
)
from pandas.core.arrays._ranges import generate_regular_range
from pandas.core.arrays.integer import IntegerArray
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.tseries.frequencies import get_period_alias
from pandas.tseries.offsets import (
BDay,
Day,
Tick,
)
if TYPE_CHECKING:
from pandas import DataFrame
from pandas.core.arrays import (
PeriodArray,
TimedeltaArray,
)
_midnight = time(0, 0)
def tz_to_dtype(tz):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return DT64NS_DTYPE
else:
return DatetimeTZDtype(tz=tz)
def _field_accessor(name: str, field: str, docstring=None):
def f(self):
values = self._local_timestamps()
if field in self._bool_ops:
result: np.ndarray
if field.endswith(("start", "end")):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
values, field, self.freqstr, month_kw
)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result, fill_value=None)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
"""
_typ = "datetimearray"
_scalar_type = Timestamp
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_any_dtype
_infer_matches = ("datetime", "datetime64", "date")
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_object_ops: list[str] = ["freq", "tz"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"week",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype | DatetimeTZDtype
_freq = None
def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy: bool = False):
values = extract_array(values, extract_numpy=True)
if isinstance(values, IntegerArray):
values = values.to_numpy("int64", na_value=iNaT)
inferred_freq = getattr(values, "_freq", None)
if isinstance(values, type(self)):
# validation
dtz = getattr(dtype, "tz", None)
if dtz and values.tz is None:
dtype = DatetimeTZDtype(tz=dtype.tz)
elif dtz and values.tz:
if not timezones.tz_compare(dtz, values.tz):
msg = (
"Timezone of the array and 'dtype' do not match. "
f"'{dtz}' != '{values.tz}'"
)
raise TypeError(msg)
elif values.tz:
dtype = values.dtype
if freq is None:
freq = values.freq
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray, ndarray, or Series or Index containing one of those."
)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(DT64NS_DTYPE)
if values.dtype != DT64NS_DTYPE:
raise ValueError(
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
f"Got {values.dtype} instead."
)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
raise ValueError(
"Frequency inference not allowed in DatetimeArray.__init__. "
"Use 'pd.array()' instead."
)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
if getattr(dtype, "tz", None):
# https://github.com/pandas-dev/pandas/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))
NDArrayBacked.__init__(self, values=values, dtype=dtype)
self._freq = freq
if inferred_freq is None and freq is not None:
type(self)._validate_frequency(self, freq)
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE
) -> DatetimeArray:
assert isinstance(values, np.ndarray)
assert values.dtype == DT64NS_DTYPE
result = super()._simple_new(values, dtype)
result._freq = freq
return result
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_not_strict(
cls,
data,
dtype=None,
copy: bool = False,
tz=None,
freq=lib.no_default,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous="raise",
):
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
subarr, tz, inferred_freq = sequence_to_dt64ns(
data,
dtype=dtype,
copy=copy,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
if explicit_none:
freq = None
dtype = tz_to_dtype(tz)
result = cls._simple_new(subarr, freq=freq, dtype=dtype)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq, ambiguous=ambiguous)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
@classmethod
def _generate_range(
cls,
start,
end,
periods,
freq,
tz=None,
normalize=False,
ambiguous="raise",
nonexistent="raise",
inclusive="both",
):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
left_inclusive, right_inclusive = validate_inclusive(inclusive)
start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize)
tz = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start_tz = None if start is None else start.tz
end_tz = None if end is None else end.tz
start = _maybe_localize_point(
start, start_tz, start, freq, tz, ambiguous, nonexistent
)
end = _maybe_localize_point(
end, end_tz, end, freq, tz, ambiguous, nonexistent
)
if freq is not None:
# We break Day arithmetic (fixed 24 hour) here and opt for
# Day to mean calendar day (23/24/25 hour). Therefore, strip
# tz info from start and day to avoid DST arithmetic
if isinstance(freq, Day):
if start is not None:
start = start.tz_localize(None)
if end is not None:
end = end.tz_localize(None)
if isinstance(freq, Tick):
values = generate_regular_range(start, end, periods, freq)
else:
xdr = generate_range(start=start, end=end, periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
_tz = start.tz if start is not None else end.tz
values = values.view("M8[ns]")
index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))
if tz is not None and index.tz is None:
arr = tzconversion.tz_localize_to_utc(
index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
index = cls(arr)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz, ambiguous, nonexistent).asm8
if end is not None:
end = end.tz_localize(tz, ambiguous, nonexistent).asm8
else:
# Create a linearly spaced date_range in local time
# Nanosecond-granularity timestamps aren't always correctly
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
arr = (
np.linspace(0, end.value - start.value, periods, dtype="int64")
+ start.value
)
dtype = tz_to_dtype(tz)
arr = arr.astype("M8[ns]", copy=False)
index = cls._simple_new(arr, freq=None, dtype=dtype)
if start == end:
if not left_inclusive and not right_inclusive:
index = index[1:-1]
else:
if not left_inclusive or not right_inclusive:
if not left_inclusive and len(index) and index[0] == start:
index = index[1:]
if not right_inclusive and len(index) and index[-1] == end:
index = index[:-1]
dtype = tz_to_dtype(tz)
return cls._simple_new(index._ndarray, freq=freq, dtype=dtype)
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value, setitem=setitem)
return value.asm8
def _scalar_from_string(self, value) -> Timestamp | NaTType:
return Timestamp(value, tz=self.tz)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
self._assert_tzawareness_compat(other)
if setitem:
# Stricter check for setitem vs comparison methods
if not timezones.tz_compare(self.tz, other.tz):
raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'")
# -----------------------------------------------------------------
# Descriptive Properties
def _box_func(self, x) -> Timestamp | NaTType:
if isinstance(x, np.datetime64):
# GH#42228
# Argument 1 to "signedinteger" has incompatible type "datetime64";
# expected "Union[SupportsInt, Union[str, bytes], SupportsIndex]"
x = np.int64(x) # type: ignore[arg-type]
ts = Timestamp(x, tz=self.tz)
# Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if ts is not NaT: # type: ignore[comparison-overlap]
# GH#41586
# do this instead of passing to the constructor to avoid FutureWarning
ts._set_freq(self.freq)
return ts
@property
# error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
# incompatible with return type "ExtensionDtype" in supertype
# "ExtensionArray"
def dtype(self) -> np.dtype | DatetimeTZDtype: # type: ignore[override]
"""
The dtype for the DatetimeArray.
.. warning::
A future version of pandas will change dtype to never be a
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
always be an instance of an ``ExtensionDtype`` subclass.
Returns
-------
numpy.dtype or DatetimeTZDtype
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
is returned.
If the values are tz-aware, then the ``DatetimeTZDtype``
is returned.
"""
return self._dtype
@property
def tz(self) -> tzinfo | None:
"""
Return timezone, if any.
Returns
-------
datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
Returns None when the array is tz-naive.
"""
# GH 18595
return getattr(self.dtype, "tz", None)
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError(
"Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate"
)
@property
def tzinfo(self) -> tzinfo | None:
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self) -> bool:
"""
Returns True if all of the dates are at midnight ("no time")
"""
return is_date_array_normalized(self.asi8, self.tz)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution_obj(self) -> Resolution:
return get_resolution(self.asi8, self.tz)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
return super().__array__(dtype=dtype)
def __iter__(self):
"""
Return an iterator over the boxed values
Yields
------
tstamp : Timestamp
"""
if self.ndim > 1:
for i in range(len(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = (length // chunksize) + 1
with warnings.catch_warnings():
# filter out warnings about Timestamp.freq
warnings.filterwarnings("ignore", category=FutureWarning)
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = ints_to_pydatetime(
data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
)
yield from converted
def astype(self, dtype, copy: bool = True):
# We handle
# --> datetime
# --> period
# DatetimeLikeArrayMixin Super handles the rest.
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if copy:
return self.copy()
return self
elif is_datetime64_ns_dtype(dtype):
return astype_dt64_to_dt64tz(self, dtype, copy, via_utc=False)
elif self.tz is None and is_datetime64_dtype(dtype) and dtype != self.dtype:
# unit conversion e.g. datetime64[s]
return self._ndarray.astype(dtype)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
# -----------------------------------------------------------------
# Rendering Methods
@dtl.ravel_compat
def _format_native_types(
self, na_rep="NaT", date_format=None, **kwargs
) -> npt.NDArray[np.object_]:
from pandas.io.formats.format import get_format_datetime64_from_values
fmt = get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(
self.asi8, tz=self.tz, format=fmt, na_rep=na_rep
)
# -----------------------------------------------------------------
# Comparison Methods
def _has_same_tz(self, other) -> bool:
# vzone shouldn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
if not hasattr(other, "tzinfo"):
return False
other_tz = other.tzinfo
return timezones.tz_compare(self.tzinfo, other_tz)
def _assert_tzawareness_compat(self, other) -> None:
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, "tzinfo", None)
other_dtype = getattr(other, "dtype", None)
if is_datetime64tz_dtype(other_dtype):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects."
)
elif other_tz is None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects"
)
# -----------------------------------------------------------------
# Arithmetic Methods
def _sub_datetime_arraylike(self, other):
"""subtract DatetimeArray/Index or ndarray[datetime64]"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
assert is_datetime64_dtype(other)
other = type(self)(other)
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
f"{type(self).__name__} subtraction must have the same "
"timezones or no timezones"
)
self_i8 = self.asi8
other_i8 = other.asi8
arr_mask = self._isnan | other._isnan
new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask)
if self._hasnans or other._hasnans:
np.putmask(new_values, arr_mask, iNaT)
return new_values.view("timedelta64[ns]")
def _add_offset(self, offset) -> DatetimeArray:
if self.ndim == 2:
return self.ravel()._add_offset(offset).reshape(self.shape)
assert not isinstance(offset, Tick)
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset._apply_array(values).view("M8[ns]")
result = DatetimeArray._simple_new(result)
result = result.tz_localize(self.tz)
except NotImplementedError:
warnings.warn(
"Non-vectorized DateOffset being applied to Series or DatetimeIndex.",
PerformanceWarning,
)
result = self.astype("O") + offset
if not len(self):
# GH#30336 _from_sequence won't be able to infer self.tz
return type(self)._from_sequence(result).tz_localize(self.tz)
return type(self)._from_sequence(result)
def _sub_datetimelike_scalar(self, other):
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
assert isinstance(other, (datetime, np.datetime64))
assert other is not NaT
other = Timestamp(other)
# error: Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if other is NaT: # type: ignore[comparison-overlap]
return self - NaT
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
"Timestamp subtraction must have the same timezones or no timezones"
)
i8 = self.asi8
result = checked_add_with_arr(i8, -other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result)
return result.view("timedelta64[ns]")
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self) -> np.ndarray:
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
if self.tz is None or timezones.is_utc(self.tz):
return self.asi8
return tzconversion.tz_convert_from_utc(self.asi8, self.tz)
def tz_convert(self, tz) -> DatetimeArray:
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
Array or Index
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.date_range(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError(
"Cannot convert tz-naive timestamps, use tz_localize to localize"
)
# No conversion since timestamps are all UTC to begin with
dtype = tz_to_dtype(tz)
return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq)
@dtl.ravel_compat
def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArray:
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
This method can also be used to do the inverse -- to create a time
zone unaware object from an aware object. To that end, pass `tz=None`.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq=None)
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2018-10-28 01:20:00+02:00
1 2018-10-28 02:36:00+02:00
2 2018-10-28 03:46:00+01:00
dtype: datetime64[ns, CET]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
if self.tz is not None:
if tz is None:
new_dates = tzconversion.tz_convert_from_utc(self.asi8, self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = tzconversion.tz_localize_to_utc(
self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
new_dates = new_dates.view(DT64NS_DTYPE)
dtype = tz_to_dtype(tz)
freq = None
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):
# we can preserve freq
# TODO: Also for fixed-offsets
freq = self.freq
elif tz is None and self.tz is None:
# no-op
freq = self.freq
return self._simple_new(new_dates, dtype=dtype, freq=freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self) -> npt.NDArray[np.object_]:
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
objects.
Returns
-------
datetimes : ndarray[object]
"""
return ints_to_pydatetime(self.asi8, tz=self.tz)
def normalize(self) -> DatetimeArray:
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
new_values = normalize_i8_timestamps(self.asi8, self.tz)
return type(self)(new_values)._with_freq("infer").tz_localize(self.tz)
@dtl.ravel_compat
def to_period(self, freq=None) -> PeriodArray:
"""
Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn(
"Converting to PeriodArray/Index representation "
"will drop timezone information.",
UserWarning,
)
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
raise ValueError(
"You must pass a freq argument as current index has none."
)
res = get_period_alias(freq)
# https://github.com/pandas-dev/pandas/issues/33358
if res is None:
res = freq
freq = res
return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)
def to_perioddelta(self, freq) -> TimedeltaArray:
"""
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
freq. Used for vectorized offsets.
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
"""
# Deprecaation GH#34853
warnings.warn(
"to_perioddelta is deprecated and will be removed in a "
"future version. "
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.",
FutureWarning,
# stacklevel chosen to be correct for when called from DatetimeIndex
stacklevel=3,
)
from pandas.core.arrays.timedeltas import TimedeltaArray
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view("m8[ns]")
return TimedeltaArray(m8delta)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "month_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
def day_name(self, locale=None):
"""
Return the day names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale.
Returns
-------
Index
Index of day names.
Examples
--------
>>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "day_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
@property
def time(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="time")
@property
def timetz(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of datetime.time also containing timezone
information. The time part of the Timestamps.
"""
return ints_to_pydatetime(self.asi8, self.tz, box="time")
@property
def date(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="date")
def isocalendar(self) -> DataFrame:
"""
Returns a DataFrame with the year, week, and day calculated according to
the ISO 8601 standard.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
with columns year, week and day
See Also
--------
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
week number, and weekday for the given Timestamp object.
datetime.date.isocalendar : Return a named tuple object with
three components: year, week and weekday.
Examples
--------
>>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
>>> idx.isocalendar()
year week day
2019-12-29 2019 52 7
2019-12-30 2020 1 1
2019-12-31 2020 1 2
2020-01-01 2020 1 3
>>> idx.isocalendar().week
2019-12-29 52
2019-12-30 1
2019-12-31 1
2020-01-01 1
Freq: D, Name: week, dtype: UInt32
"""
from pandas import DataFrame
values = self._local_timestamps()
sarray = fields.build_isocalendar_sarray(values)
iso_calendar_df = DataFrame(
sarray, columns=["year", "week", "day"], dtype="UInt32"
)
if self._hasnans:
iso_calendar_df.iloc[self._isnan] = None
return iso_calendar_df
@property
def weekofyear(self):
"""
The week ordinal of the year.
.. deprecated:: 1.1.0
weekofyear and week have been deprecated.
Please use DatetimeIndex.isocalendar().week instead.
"""
warnings.warn(
"weekofyear and week have been deprecated, please use "
"DatetimeIndex.isocalendar().week instead, which returns "
"a Series. To exactly reproduce the behavior of week and "
"weekofyear and return an Index, you may call "
"pd.Int64Index(idx.isocalendar().week)",
FutureWarning,
stacklevel=3,
)
week_series = self.isocalendar().week
if week_series.hasnans:
return week_series.to_numpy(dtype="float64", na_value=np.nan)
return week_series.to_numpy(dtype="int64")
week = weekofyear
year = _field_accessor(
"year",
"Y",
"""
The year of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="Y")
... )
>>> datetime_series
0 2000-12-31
1 2001-12-31
2 2002-12-31
dtype: datetime64[ns]
>>> datetime_series.dt.year
0 2000
1 2001
2 2002
dtype: int64
""",
)
month = _field_accessor(
"month",
"M",
"""
The month as January=1, December=12.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="M")
... )
>>> datetime_series
0 2000-01-31
1 2000-02-29
2 2000-03-31
dtype: datetime64[ns]
>>> datetime_series.dt.month
0 1
1 2
2 3
dtype: int64
""",
)
day = _field_accessor(
"day",
"D",
"""
The day of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="D")
... )
>>> datetime_series
0 2000-01-01
1 2000-01-02
2 2000-01-03
dtype: datetime64[ns]
>>> datetime_series.dt.day
0 1
1 2
2 3
dtype: int64
""",
)
hour = _field_accessor(
"hour",
"h",
"""
The hours of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="h")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 01:00:00
2 2000-01-01 02:00:00
dtype: datetime64[ns]
>>> datetime_series.dt.hour
0 0
1 1
2 2
dtype: int64
""",
)
minute = _field_accessor(
"minute",
"m",
"""
The minutes of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="T")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:01:00
2 2000-01-01 00:02:00
dtype: datetime64[ns]
>>> datetime_series.dt.minute
0 0
1 1
2 2
dtype: int64
""",
)
second = _field_accessor(
"second",
"s",
"""
The seconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="s")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:00:01
2 2000-01-01 00:00:02
dtype: datetime64[ns]
>>> datetime_series.dt.second
0 0
1 1
2 2
dtype: int64
""",
)
microsecond = _field_accessor(
"microsecond",
"us",
"""
The microseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="us")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000
1 2000-01-01 00:00:00.000001
2 2000-01-01 00:00:00.000002
dtype: datetime64[ns]
>>> datetime_series.dt.microsecond
0 0
1 1
2 2
dtype: int64
""",
)
nanosecond = _field_accessor(
"nanosecond",
"ns",
"""
The nanoseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="ns")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000000
1 2000-01-01 00:00:00.000000001
2 2000-01-01 00:00:00.000000002
dtype: datetime64[ns]
>>> datetime_series.dt.nanosecond
0 0
1 1
2 2
dtype: int64
""",
)
_dayofweek_doc = """
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor) or DatetimeIndex.
Returns
-------
Series or Index
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
Freq: D, dtype: int64
"""
day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc)
dayofweek = day_of_week
weekday = day_of_week
day_of_year = _field_accessor(
"dayofyear",
"doy",
"""
The ordinal day of the year.
""",
)
dayofyear = day_of_year
quarter = _field_accessor(
"quarter",
"q",
"""
The quarter of the date.
""",
)
days_in_month = _field_accessor(
"days_in_month",
"dim",
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
_is_month_doc = """
Indicates whether the date is the {first_or_last} day of the month.
Returns
-------
Series or array
For Series, returns a Series with boolean values.
For DatetimeIndex, returns a boolean array.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> s.dt.is_month_start
0 False
1 False
2 True
dtype: bool
>>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.is_month_start
array([False, False, True])
>>> idx.is_month_end
array([False, True, False])
"""
is_month_start = _field_accessor(
"is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first")
)
is_month_end = _field_accessor(
"is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last")
)
is_quarter_start = _field_accessor(
"is_quarter_start",
"is_quarter_start",
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_start=df.dates.dt.is_quarter_start)
dates quarter is_quarter_start
0 2017-03-30 1 False
1 2017-03-31 1 False
2 2017-04-01 2 True
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_start
array([False, False, True, False])
""",
)
is_quarter_end = _field_accessor(
"is_quarter_end",
"is_quarter_end",
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_end=df.dates.dt.is_quarter_end)
dates quarter is_quarter_end
0 2017-03-30 1 False
1 2017-03-31 1 True
2 2017-04-01 2 False
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_end
array([False, True, False, False])
""",
)
is_year_start = _field_accessor(
"is_year_start",
"is_year_start",
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_start
array([False, False, True])
""",
)
is_year_end = _field_accessor(
"is_year_end",
"is_year_end",
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_end
array([False, True, False])
""",
)
is_leap_year = _field_accessor(
"is_leap_year",
"is_leap_year",
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series or ndarray
Booleans indicating if dates belong to a leap year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y")
>>> idx
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[ns]', freq='A-DEC')
>>> idx.is_leap_year
array([ True, False, False])
>>> dates_series = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[ns]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
""",
)
def to_julian_date(self) -> np.ndarray:
"""
Convert Datetime Array to float64 ndarray of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
https://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return (
day
+ np.fix((153 * month - 457) / 5)
+ 365 * year
+ np.floor(year / 4)
- np.floor(year / 100)
+ np.floor(year / 400)
+ 1_721_118.5
+ (
self.hour
+ self.minute / 60
+ self.second / 3600
+ self.microsecond / 3600 / 10 ** 6
+ self.nanosecond / 3600 / 10 ** 9
)
/ 24
)
# -----------------------------------------------------------------
# Reductions
def std(
self,
axis=None,
dtype=None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
):
# Because std is translation-invariant, we can get self.std
# by calculating (self - Timestamp(0)).std, and we can do it
# without creating a copy by using a view on self._ndarray
from pandas.core.arrays import TimedeltaArray
tda = TimedeltaArray(self._ndarray.view("i8"))
return tda.std(
axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna
)
# -------------------------------------------------------------------
# Constructor Helpers
@overload
def sequence_to_datetimes(
data, allow_object: Literal[False] = ..., require_iso8601: bool = ...
) -> DatetimeArray:
...
@overload
def sequence_to_datetimes(
data, allow_object: Literal[True] = ..., require_iso8601: bool = ...
) -> np.ndarray | DatetimeArray:
...
def sequence_to_datetimes(
data, allow_object: bool = False, require_iso8601: bool = False
) -> np.ndarray | DatetimeArray:
"""
Parse/convert the passed data to either DatetimeArray or np.ndarray[object].
"""
result, tz, freq = sequence_to_dt64ns(
data,
allow_object=allow_object,
allow_mixed=True,
require_iso8601=require_iso8601,
)
if result.dtype == object:
return result
dtype = tz_to_dtype(tz)
dta = DatetimeArray._simple_new(result, freq=freq, dtype=dtype)
return dta
def sequence_to_dt64ns(
data,
dtype=None,
copy=False,
tz=None,
dayfirst=False,
yearfirst=False,
ambiguous="raise",
*,
allow_object: bool = False,
allow_mixed: bool = False,
require_iso8601: bool = False,
):
"""
Parameters
----------
data : list-like
dtype : dtype, str, or None, default None
copy : bool, default False
tz : tzinfo, str, or None, default None
dayfirst : bool, default False
yearfirst : bool, default False
ambiguous : str, bool, or arraylike, default 'raise'
See pandas._libs.tslibs.tzconversion.tz_localize_to_utc.
allow_object : bool, default False
Whether to return an object-dtype ndarray instead of raising if the
data contains more than one timezone.
allow_mixed : bool, default False
Interpret integers as timestamps when datetime objects are also present.
require_iso8601 : bool, default False
Only consider ISO-8601 formats when parsing strings.
Returns
-------
result : numpy.ndarray
The sequence converted to a numpy array with dtype ``datetime64[ns]``.
tz : tzinfo or None
Either the user-provided tzinfo or one inferred from the data.
inferred_freq : Tick or None
The inferred frequency of the sequence.
Raises
------
TypeError : PeriodDType data is passed
"""
inferred_freq = None
dtype = _validate_dt64_dtype(dtype)
tz = timezones.maybe_get_tz(tz)
# if dtype has an embedded tz, capture it
tz = validate_tz_from_dtype(dtype, tz)
if not hasattr(data, "dtype"):
# e.g. list, tuple
if np.ndim(data) == 0:
# i.e. generator
data = list(data)
data = np.asarray(data)
copy = False
elif isinstance(data, ABCMultiIndex):
raise TypeError("Cannot create a DatetimeArray from a MultiIndex.")
else:
data = extract_array(data, extract_numpy=True)
if isinstance(data, IntegerArray):
data = data.to_numpy("int64", na_value=iNaT)
elif not isinstance(data, (np.ndarray, ExtensionArray)):
# GH#24539 e.g. xarray, dask object
data = np.asarray(data)
if isinstance(data, DatetimeArray):
inferred_freq = data.freq
# By this point we are assured to have either a numpy array or Index
data, copy = maybe_convert_dtype(data, copy)
data_dtype = getattr(data, "dtype", None)
if (
is_object_dtype(data_dtype)
or is_string_dtype(data_dtype)
or is_sparse(data_dtype)
):
# TODO: We do not have tests specific to string-dtypes,
# also complex or categorical or other extension
copy = False
if lib.infer_dtype(data, skipna=False) == "integer":
data = data.astype(np.int64)
else:
# data comes back here as either i8 to denote UTC timestamps
# or M8[ns] to denote wall times
data, inferred_tz = objects_to_datetime64ns(
data,
dayfirst=dayfirst,
yearfirst=yearfirst,
allow_object=allow_object,
allow_mixed=allow_mixed,
require_iso8601=require_iso8601,
)
if tz and inferred_tz:
# two timezones: convert to intended from base UTC repr
data = tzconversion.tz_convert_from_utc(data.view("i8"), tz)
data = data.view(DT64NS_DTYPE)
elif inferred_tz:
tz = inferred_tz
elif allow_object and data.dtype == object:
# We encountered mixed-timezones.
return data, None, None
data_dtype = data.dtype
# `data` may have originally been a Categorical[datetime64[ns, tz]],
# so we need to handle these types.
if is_datetime64tz_dtype(data_dtype):
# DatetimeArray -> ndarray
tz = _maybe_infer_tz(tz, data.tz)
result = data._ndarray
elif is_datetime64_dtype(data_dtype):
# tz-naive DatetimeArray or ndarray[datetime64]
data = getattr(data, "_ndarray", data)
if data.dtype != DT64NS_DTYPE:
data = conversion.ensure_datetime64ns(data)
copy = False
if tz is not None:
# Convert tz-naive to UTC
tz = timezones.maybe_get_tz(tz)
data = tzconversion.tz_localize_to_utc(
data.view("i8"), tz, ambiguous=ambiguous
)
data = data.view(DT64NS_DTYPE)
assert data.dtype == DT64NS_DTYPE, data.dtype
result = data
else:
# must be integer dtype otherwise
# assume this data are epoch timestamps
if tz:
tz = timezones.maybe_get_tz(tz)
if data.dtype != INT64_DTYPE:
data = data.astype(np.int64, copy=False)
result = data.view(DT64NS_DTYPE)
if copy:
result = result.copy()
assert isinstance(result, np.ndarray), type(result)
assert result.dtype == "M8[ns]", result.dtype
# We have to call this again after possibly inferring a tz above
validate_tz_from_dtype(dtype, tz)
return result, tz, inferred_freq
def objects_to_datetime64ns(
data: np.ndarray,
dayfirst,
yearfirst,
utc=False,
errors="raise",
require_iso8601: bool = False,
allow_object: bool = False,
allow_mixed: bool = False,
):
"""
Convert data to array of timestamps.
Parameters
----------
data : np.ndarray[object]
dayfirst : bool
yearfirst : bool
utc : bool, default False
Whether to convert timezone-aware timestamps to UTC.
errors : {'raise', 'ignore', 'coerce'}
require_iso8601 : bool, default False
allow_object : bool
Whether to return an object-dtype ndarray instead of raising if the
data contains more than one timezone.
allow_mixed : bool, default False
Interpret integers as timestamps when datetime objects are also present.
Returns
-------
result : ndarray
np.int64 dtype if returned values represent UTC timestamps
np.datetime64[ns] if returned values represent wall times
object if mixed timezones
inferred_tz : tzinfo or None
Raises
------
ValueError : if data cannot be converted to datetimes
"""
assert errors in ["raise", "ignore", "coerce"]
# if str-dtype, convert
data = np.array(data, copy=False, dtype=np.object_)
flags = data.flags
order: Literal["F", "C"] = "F" if flags.f_contiguous else "C"
try:
result, tz_parsed = tslib.array_to_datetime(
data.ravel("K"),
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
require_iso8601=require_iso8601,
allow_mixed=allow_mixed,
)
result = result.reshape(data.shape, order=order)
except ValueError as err:
try:
values, tz_parsed = conversion.datetime_to_datetime64(data.ravel("K"))
# If tzaware, these values represent unix timestamps, so we
# return them as i8 to distinguish from wall times
values = values.reshape(data.shape, order=order)
return values.view("i8"), tz_parsed
except (ValueError, TypeError):
raise err
if tz_parsed is not None:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
# Return i8 values to denote unix timestamps
return result.view("i8"), tz_parsed
elif is_datetime64_dtype(result):
# returning M8[ns] denotes wall-times; since tz is None
# the distinction is a thin one
return result, tz_parsed
elif is_object_dtype(result):
# GH#23675 when called via `pd.to_datetime`, returning an object-dtype
# array is allowed. When called via `pd.DatetimeIndex`, we can
# only accept datetime64 dtype, so raise TypeError if object-dtype
# is returned, as that indicates the values can be recognized as
# datetimes but they have conflicting timezones/awareness
if allow_object:
return result, tz_parsed
raise TypeError(result)
else: # pragma: no cover
# GH#23675 this TypeError should never be hit, whereas the TypeError
# in the object-dtype branch above is reachable.
raise TypeError(result)
def maybe_convert_dtype(data, copy: bool):
"""
Convert data based on dtype conventions, issuing deprecation warnings
or errors where appropriate.
Parameters
----------
data : np.ndarray or pd.Index
copy : bool
Returns
-------
data : np.ndarray or pd.Index
copy : bool
Raises
------
TypeError : PeriodDType data is passed
"""
if not hasattr(data, "dtype"):
# e.g. collections.deque
return data, copy
if is_float_dtype(data.dtype):
# Note: we must cast to datetime64[ns] here in order to treat these
# as wall-times instead of UTC timestamps.
data = data.astype(DT64NS_DTYPE)
copy = False
# TODO: deprecate this behavior to instead treat symmetrically
# with integer dtypes. See discussion in GH#23675
elif is_timedelta64_dtype(data.dtype) or is_bool_dtype(data.dtype):
# GH#29794 enforcing deprecation introduced in GH#23539
raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]")
elif is_period_dtype(data.dtype):
# Note: without explicitly raising here, PeriodIndex
# test_setops.test_join_does_not_recur fails
raise TypeError(
"Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead"
)
elif is_categorical_dtype(data.dtype):
# GH#18664 preserve tz in going DTI->Categorical->DTI
# TODO: cases where we need to do another pass through this func,
# e.g. the categories are timedelta64s
data = data.categories.take(data.codes, fill_value=NaT)._values
copy = False
elif is_extension_array_dtype(data.dtype) and not | is_datetime64tz_dtype(data.dtype) | pandas.core.dtypes.common.is_datetime64tz_dtype |
import pytesseract
from pytesseract import Output
import cv2
import jiwer
import numpy as np
import pandas as pd
import base64
class Class_Pytesseract_OCR:
def __init__(self, hyperparams,model_parameters,return_formats):
#---------dataset_infos
self.X = None
self.y_target = None
#---------model_parameters
self.ocr_engine = model_parameters['ocr_engine']
self.segmentation_mode = model_parameters['segmentation_mode']
self.language = model_parameters['language']
self.custom_config = self._create_custom_config_string()
#---------hyperparams
self.bbox_conf = hyperparams['bbox_conf']
#---------return_formats
self.bbox_return = return_formats['bbox_return']
self.image_return_format = return_formats['image_return_format']
self.remove_linebreaks = return_formats['remove_linebreaks']
#------- Results
self.y_pred = None
self.avg_mer = None
self.avg_wer = None
self.avg_wil = None
self.avg_wip = None
self.df_result = None
def _create_custom_config_string(self):
segmentaion_mode_dict = {"Orientation and script detection (OSD) only.":"0",
"Automatic page segmentation with OSD.":"1",
"Automatic page segmentation, but no OSD, or OCR.":"2",
"Fully automatic page segmentation, but no OSD. (Default)":"3",
"Assume a single column of text of variable sizes.":"4",
"Assume a single uniform block of vertically aligned text.":"5",
"Assume a single uniform block of text.":"6",
"Treat the image as a single text line.":"7",
"Treat the image as a single word.":"8",
"Treat the image as a single word in a circle.":"9",
"Treat the image as a single character.":"10",
"Sparse text. Find as much text as possible in no particular order.":"11",
"Sparse text with OSD.":"12",
"Raw line. Treat the image as a single text line, bypassing hacks that are Tesseract-specific.":"13"}
ocr_engine_dict = {"Legacy engine only.":"0",
"Neural nets LSTM engine only.":"1",
"Legacy + LSTM engines.":"2",
"Default, based on what is available.":"3"}
custom_config = (f"-l {self.language} --oem {ocr_engine_dict[self.ocr_engine]} --psm {segmentaion_mode_dict[self.segmentation_mode]}")
custom_config = r'{}'.format(custom_config)
return custom_config
def _calc_metrics(self,ground_truth,hypothesis):
transformation = jiwer.Compose([
jiwer.ToLowerCase(),
jiwer.RemoveMultipleSpaces(),
jiwer.RemoveWhiteSpace(replace_by_space=" "),
jiwer.SentencesToListOfWords(word_delimiter=" ")
])
mer = jiwer.mer(
ground_truth,
hypothesis,
truth_transform=transformation,
hypothesis_transform=transformation
)
wer = jiwer.wer(
ground_truth,
hypothesis,
truth_transform=transformation,
hypothesis_transform=transformation
)
wil = jiwer.wil(
ground_truth,
hypothesis,
truth_transform=transformation,
hypothesis_transform=transformation
)
wip = jiwer.wip(
ground_truth,
hypothesis,
truth_transform=transformation,
hypothesis_transform=transformation
)
return mer,wer,wil,wip
def _get_bounding_box(self,d):
bboxes_list = []
n_boxes = len(d['text'])
for i in range(n_boxes):
if int(d['conf'][i]) > self.bbox_conf:
(x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])
bboxes_list.append((x, y, w, h))
return bboxes_list
def _get_metrics(self):
mer_list,wer_list,wil_list,wip_list = [],[],[],[]
for yt,yp in zip(self.y_target,self.y_pred):
mer,wer,wil,wip = self._calc_metrics(yt,yp)
mer_list.append(mer)
wer_list.append(wer)
wil_list.append(wil)
wip_list.append(wip)
return mer_list,wer_list,wil_list,wip_list, np.mean(mer_list),np.mean(wer_list),np.mean(wil_list),np.mean(wip_list)
def _remove_linebreaks_from_text(self,text):
if self.remove_linebreaks:
text = text.replace("\n"," ")
text = text.replace("\t"," ")
return text
def predict(self,img_reference,step,return_formats = None):
if step == "Experiment":
img = cv2.imread(img_reference)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if step == "Deployment":
img = img_reference
d = pytesseract.image_to_data(img, config=self.custom_config,output_type=Output.DICT)
text = pytesseract.image_to_string(img, config=self.custom_config)
text = self._remove_linebreaks_from_text(text)
if return_formats:
bbox_list = self._get_bounding_box(d)
if return_formats['bbox_return'] == "np_array":
result = bbox_list, text
if return_formats['bbox_return'] == "image":
img_bytes_base64 = self._overlay_image_with_bboxes(img,bbox_list,return_formats['image_return_format'])
result = img_bytes_base64, text
else:
result = d, text
return result
def _overlay_image_with_bboxes(self,img,bbox_list,image_type):
for i in bbox_list:
img = cv2.rectangle(img, (i[0], i[1]), (i[0] + i[2], i[1] + i[3]), (0, 255, 0), 2)
_, buffer = cv2.imencode(image_type,img)
img_bytes_base64 = base64.b64encode(buffer).decode()
return img_bytes_base64
def _construct_result_dataframe(self,step):
all_bboxes_list = []
self.y_pred = []
for image_path in self.X:
d,text = self.predict(image_path,step)
text = self._remove_linebreaks_from_text(text)
self.y_pred.append(text)
bboxes_list = self._get_bounding_box(d)
all_bboxes_list.append(bboxes_list)
if step == 'Experiment':
mer_list,wer_list,wil_list,wip_list,self.avg_mer,self.avg_wer,self.avg_wil,self.avg_wip = self._get_metrics()
self.df_result = pd.DataFrame({'source_text': self.X, 'target_ocr_text': self.y_target,'predicted_ocr_text': self.y_pred,'BBOXES_COORDS(X, Y, W, H)':all_bboxes_list,'Match Error Rate (MER)':mer_list,' Word Error Rate (WER)':wer_list,'Word Information Lost (WIL)':wil_list,'Word Information Preserved (WIP)':wip_list})
if step == 'Deployment':
self.df_result = | pd.DataFrame({'source_text': self.X,'predicted_ocr_text': self.y_pred,'BBOXES_COORDS(X, Y, W, H)':all_bboxes_list}) | pandas.DataFrame |
# @name: create_fake_data.py
# @summary: Creates a series of fake patients and "data" simulating CViSB data
# @description: For prototyping and testing out CViSB data management website, creating a series of fake patients and data files.
# @sources:
# @depends:
# @author: <NAME>
# @email: <EMAIL>
# @license: Apache-2.0
# @date: 16 March 2018
import pandas as pd
import numpy as np
import io
import dropbox
# [ Set up params ] ---------------------------------------------------------------------------------------
token = ""
dropbox_folder = "/CViSB_test"
expt_file = 'expt_list.csv'
# [ Set up fake patient generator ] -----------------------------------------------------------------------
def fakePatients(number = 25):
ids = np.arange(number)
patients = pd.DataFrame()
create_id = np.vectorize(lambda x: 'fakeid' + str(x).zfill(4))
patients['patient_id'] = create_id(ids)
patients['sex'] = patients.apply(create_sex, axis = 1)
patients['age'] = patients.apply(create_age, axis = 1)
patients['cohort'] = patients.apply(create_cohort, axis = 1)
patients['cohort_exposure'] = patients.apply(create_exposure, axis = 1)
patients['timepoints'] = patients.apply(create_timepts, axis = 1)
return patients
def create_sex(x):
if (np.random.rand() > 0.5):
return('male')
else:
return('female')
def create_age(x):
return round(np.random.rand()*100)
def create_cohort(x):
if (np.random.rand() > 0.67):
return('Ebola')
else:
return('Lassa')
def create_exposure(x):
rand_num = np.random.rand()
if (rand_num > 0.8):
return("exposed")
elif (rand_num > 0.1):
if (np.random.rand() > 0.2):
return("died")
else:
return("survived")
else:
return("community")
def create_timepts(x):
rand_num = np.random.rand()
timepts = [
[0, 1],
[0, 1, 2],
[0, 1, 2, 3],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4, 7],
[0, 1, 2, 3, 4, 7, 10]
]
if (rand_num < 0.4):
return(timepts[0])
elif (rand_num < 0.6):
return(timepts[1])
elif (rand_num < 0.6):
return(timepts[2])
elif (rand_num < 0.6):
return(timepts[3])
elif (rand_num < 0.6):
return(timepts[4])
else:
return(timepts[5])
# [ Create patients ] -------------------------------------------------------------------------------------
patients = fakePatients()
# --- Upload to dropbox ---
dbx = dropbox.Dropbox(token)
dbx.files_upload(patients.to_csv(index = False).encode('utf-8'), dropbox_folder + '/fakepatient_roster.csv')
# dbx.files_upload(patients.to_csv(sep = '\t', index = False).encode('utf-8'), '/fakepatient_roster.tsv')
# [ Create samples ] --------------------------------------------------------------------------------------
# Convert array of timepoints to wide dataframe of timepoints
# TODO: use the function I wrote
tp = pd.DataFrame(patients['timepoints'].values.tolist(), index = patients.patient_id).reset_index()
# Convert to long dataframe
tp = pd.melt(tp, id_vars = ['patient_id'], value_name = 'timepoint').drop('variable', axis = 1)
# Remove NAs
tp = tp.dropna(axis = 0, how='any')
# Create a sample for every timepoint
sample_list = pd.DataFrame(data = {'sample_id': ['plasma', 'PMBC', 'hDNA', 'vDNA', 'hRNA', 'vRNA'], 'description': ['raw blood plasma', 'raw peripheral blood mononuclear cells', 'extracted host DNA', 'extracted viral DNA', 'extracted host RNA', 'extracted viral RNA']})
sample_list['tmp'] = 1
tp['tmp'] = 1
# Merge on drop timepoint 0; no biological data taken
samples = pd.merge(tp[tp.timepoint > 0], sample_list, on='tmp').drop('tmp', axis = 1)
# Fill some fields to be inputted later.
samples['creation_date'] = np.NaN
samples['storage_loc'] = np.NaN
samples['invalid'] = False
dbx.files_upload(samples.to_csv(index = False).encode('utf-8'), dropbox_folder + '/fakesample_list.csv', mode=dropbox.files.WriteMode.overwrite)
# [ Generate file list ] ----------------------------------------------------------------------------------
tp.drop('tmp', axis = 1)
def gen_filelist(patient_timepts):
# read in the experiment file structure
md, res = dbx.files_download(dropbox_folder +"/" + expt_file)
if(res.status_code == 200):
expts = pd.read_csv(io.StringIO(res.content.decode('utf-8')))
expts
def array2long(df, var, id_var):
# remove any NAs in column
df = df[df[var].notnull()]
if(any(df[var].apply(lambda x: type(x)) == str)):
# Convert string to array
df[var] = df[var].apply(lambda x: x.replace(', ', ',').split(','))
# splay data frame wide
temp = pd.DataFrame(df[var].values.tolist(), index = df[id_var]).reset_index()
# Convert to long dataframe
temp = pd.melt(temp, id_vars = [id_var], value_name = var).drop('variable', axis = 1)
# Remove NAs
temp = temp.dropna(axis = 0, how='any')
return temp
# BUG: fix the .loc for NAN setting
ex_times = array2long(expts, 'timepts', 'expt_id')
ex_files = array2long(expts, 'file_types', 'expt_id')
ex_pars = array2long(expts, 'params', 'expt_id')
# Merge on drop timepoint 0; no biological data taken
expt_files = pd.merge(ex_times, ex_files, on='expt_id')
expt_files = | pd.merge(expt_files, ex_pars, on='expt_id', how='outer') | pandas.merge |
import os
import re
import unicodedata
from twitter import OAuth, Twitter
import numpy as np
import pandas as pd
import arrow
from . import templates, plots
from loonathetrends.utils import get_video_title_lookup, get_video_ismv_lookup
auth = OAuth(
os.environ["TWITTER_ACCESSTOKEN"],
os.environ["TWITTER_ACCESSSECRET"],
os.environ["TWITTER_CONSUMERKEY"],
os.environ["TWITTER_CONSUMERSECRET"],
)
t = Twitter(auth=auth)
t_upload = Twitter(domain="upload.twitter.com", auth=auth)
MILESTONES = {
100_000: "100k",
200_000: "200k",
500_000: "500k",
1_000_000: "1M",
2_000_000: "2M",
5_000_000: "5M",
10_000_000: "10M",
20_000_000: "20M",
50_000_000: "50M",
100_000_000: "100M",
}
REGEX_YOUTUBEURL = r"(?:.+?)?(?:\/v\/|watch\/|\?v=|\&v=|youtu\.be\/|\/v=|^youtu\.be\/)([a-zA-Z0-9_-]{11})+"
def _status_length(status):
return len(unicodedata.normalize("NFC", status))
def followers_update(db, freq, dry_run=False, post_plots=False):
if freq == "daily":
ndays = 1
elif freq == "weekly":
ndays = 7
else:
raise RuntimeError("Parameter freq provided not valid")
query = (
"SELECT * FROM followers "
"WHERE tstamp = current_date "
"OR tstamp >= current_date - %s "
"ORDER BY tstamp"
)
template = templates.followers_update
df = pd.read_sql(query, db, params=(ndays,))
date = arrow.get(df["tstamp"].iloc[-1]).format("YYMMDD")
grouped = df.groupby("site")
tots = grouped.last()["count"].to_dict()
difs = (grouped.last()["count"] - grouped.first()["count"]).to_dict()
status = template(freq=freq, date=date, tots=tots, difs=difs)
if _status_length(status) > 280:
raise RuntimeError(f"The status update is {_status_length(status)} characters long.")
if post_plots:
media = plots.new_followers(db)
else:
media = []
if not dry_run:
media_ids = []
for img in media:
media_id = t_upload.media.upload(media=img)["media_id_string"]
media_ids.append(media_id)
if media_ids:
def chunk_four(l):
for i in range(0, len(l), 4):
yield l[i : i + 4]
last_tweet = None
for chunk in chunk_four(media_ids):
if last_tweet == None:
last_tweet = t.statuses.update(
status=status, media_ids=",".join(chunk)
)
else:
last_tweetid = last_tweet["id_str"]
last_tweet = t.statuses.update(
status="@loonathetrends",
media_ids=",".join(chunk),
in_reply_to_status_id=last_tweetid,
)
else:
t.statuses.update(status=status)
else:
for n, img in enumerate(media, 1):
with open("test{}.png".format(n), "wb") as f:
f.write(img)
return status
def youtube_update(db, kind, dry_run=False):
# create DataFrame for stats
stats = pd.read_sql(
"SELECT * FROM video_stats WHERE "
"tstamp >= (current_date - 8)"
"ORDER BY tstamp",
db,
parse_dates=["tstamp"],
).set_index("tstamp")
lookup = get_video_title_lookup(db)
# find out what video to post about
func = lambda x: x.diff().last("7d").sum()
if kind == "latest":
mvlookup = pd.Series(get_video_ismv_lookup(db))
videoid = (
pd.read_sql(
"SELECT published_at, video_id FROM videos ORDER BY published_at", db
)
.set_index("video_id")
.loc[mvlookup]
.index[-1]
)
elif kind == "views":
videoid = stats.groupby("video_id")["views"].agg(func).idxmax()
elif kind == "likes":
videoid = stats.groupby("video_id")["likes"].agg(func).idxmax()
elif kind == "comments":
videoid = stats.groupby("video_id")["comments"].agg(func).idxmax()
# get and trim stats
stats = stats[stats.video_id == videoid].drop("video_id", axis=1)
last = stats.index[-1]
length = pd.Timedelta("1d")
trimmed = stats.reindex(pd.date_range(last - length, last, freq="h"))
# assign fill-ins for template
title = lookup[videoid]
tots = trimmed.iloc[-1].to_dict()
rates = (trimmed.diff().mean() * 24).to_dict()
date = arrow.get(last).format("YYMMDD")
# make charts
media = plots.youtube(db, videoid)
# fill template
kind_template = {
"latest": "Latest @loonatheworld music video:",
"views": "Most viewed @loonatheworld video this week:",
"likes": "Most liked @loonatheworld video this week:",
"comments": "Most commented @loonatheworld video this week:",
}
template = templates.youtube_update
status = template(
kind=kind_template[kind],
title=title,
date=date,
tots=tots,
rates=rates,
videoid=videoid,
)
if _status_length(status) > 280:
raise RuntimeError(f"The status update is {_status_length(status)} characters long.")
# post on twitter
if not dry_run:
media_id = t_upload.media.upload(media=media)["media_id_string"]
t.statuses.update(status=status, media_ids=media_id)
return status
def youtube_milestone(db, dry_run=False):
# get the stats
stats = pd.read_sql(
"SELECT * FROM video_stats WHERE "
"tstamp >= (current_date - 8) "
"ORDER BY tstamp",
db,
parse_dates=["tstamp"],
)
mvlookup = pd.Series(get_video_ismv_lookup(db))
lookup = get_video_title_lookup(db)
# get the current view counts for all videos
views = stats.groupby("video_id")["views"].last()
# calculate how many views are left for each milestone for all videos
marray = np.array(list(MILESTONES)).reshape((1, len(MILESTONES)))
varray = np.array(views).reshape((len(views), 1))
viewsleft = pd.DataFrame(
columns=MILESTONES, data=marray - varray, index=views.index
)
viewsleft[viewsleft <= 0] = None # discard reached milestones
# calculate the current view rate for all videos
pivot = stats.pivot(index="tstamp", columns="video_id", values="views")
rates = pivot.last("7d1h").asfreq("h").diff().mean() * 24
rates[rates <= 0] = None
# calculate how many days are left to reach the milestones
daysleft = viewsleft.apply(lambda x: x / rates).min(axis=1)
# find out the featured video for today
daysleft_mv = daysleft[(daysleft <= 7) & mvlookup]
videoid = daysleft.idxmin() if daysleft_mv.empty else daysleft_mv.idxmin()
milestone = viewsleft.loc[videoid].idxmin()
# create the mapping for the tweet template
fillin = {
"date": arrow.now().format("YYMMDD"),
"videoid": videoid,
"title": lookup[videoid],
"diff": viewsleft.loc[videoid].min(),
"milestone": MILESTONES[milestone],
"prediction": (arrow.now().shift(days=daysleft.loc[videoid]).humanize()),
}
# fill in the template and post on Twitter
template = templates.youtube_milestone
status = template(**fillin)
if _status_length(status) > 280:
raise RuntimeError(f"The status update is {_status_length(status)} characters long.")
if not dry_run:
t.statuses.update(status=status)
return status
def youtube_milestone_reached(db, dry_run=False):
videos = pd.read_sql(
"select video_id, title, age(published_at) as age from videos", db
).values
for video_id, title, age in videos:
if age < | pd.Timedelta("3d") | pandas.Timedelta |
import pandas as pd
def extract_feature_values(data):
""" Given a params dict, return the values for feeding into a model"""
# Replace these features with the features for your model. They need to
# correspond with the `name` attributes of the <input> tags
EXPECTED_FEATURES = [
"user_id",
]
# This assumes all inputs will be numeric. If you have categorical features
# that the user enters as a string, you'll want to rewrite this as a for
# loop that treats different features differently
#values = [[float(data[feature]) for feature in EXPECTED_FEATURES]]
return | pd.DataFrame(values, columns=EXPECTED_FEATURES) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Author: gunjianpan
# @Date: 2019-03-04 19:03:49
# @Last Modified by: gunjianpan
# @Last Modified time: 2019-03-28 10:26:48
import lightgbm as lgb
import numpy as np
import pandas as pd
import warnings
import threading
import time
from datetime import datetime
from numba import jit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from util.util import *
warnings.filterwarnings('ignore')
data_path = 'concrete/data/'
model_path = 'concrete/model/'
pickle_path = 'concrete/pickle/'
prediction_path = 'concrete/prediction/'
v = '2'
# t = '_total'
t = ''
class Concrete(object):
"""
data minie for concrete
"""
def __init__(self, do_pre=False):
self.id2file = {}
self.id2lab = {}
self.detail_map = {}
self.detail_pickle = {}
self.f1_max_index = 0.5
self.f1_map = {index: 0 for index in range(0, 5)}
self.version = datetime.now().strftime("%m%d%H%M")
self.seed = 333
self.EARLY_STOP = 300
self.OPT_ROUNDS = 2444
self.MAX_ROUNDS = 300000
self.evaluate_num = 0
self.params = {
'boosting': 'gbdt',
'objective': 'binary',
'learning_rate': 0.01,
'max_depth': -1,
'min_child_samples': 20,
'max_bin': 255,
'subsample': 0.85,
'subsample_freq': 10,
'colsample_bytree': 0.8,
'min_child_weight': 0.001,
'subsample_for_bin': 200000,
'min_split_gain': 0,
'reg_alpha': 0,
'reg_lambda': 0,
'num_leaves': 63,
'seed': self.seed,
'nthread': 20,
'metric': "None",
"verbose": -1
}
self.pre_data_list(do_pre)
def load_basic(self, file_type):
"""
load basic
@param file_type: 1-submit_example, 0-train_labels
"""
file_name = 'submit_example' if file_type else 'train_labels'
file_name += '.csv'
with open(data_path + file_name, 'r') as f:
train_list = f.readlines()[1:]
self.id2file = {
index: train[:-1].split(',')[0] for index, train in enumerate(train_list)}
self.id2lab = {index: int(train[:-1].split(',')[1])
for index, train in enumerate(train_list)}
def load_detail(self, file_type, block_size=500):
"""
load detail
@param file_type: 1-submit_example, 0-train_labels
"""
pickle_file = 'submit_middle' if file_type else 'train_middle'
pickle_file += '.pickle'
detail_pickle = load_bigger(pickle_path + pickle_file)
print('load over')
id_len = len(self.id2lab.keys())
for block_index in range((id_len - 1) // block_size + 1):
index_min = block_size * block_index
index_max = min(id_len, (block_index + 1) * block_size)
threadings = []
for index in list(self.id2file.keys())[index_min:index_max]:
label_id = self.id2lab[index]
detail_csv = detail_pickle[index]
work = threading.Thread(
target=self.pre_data_once, args=(index, file_type, label_id, detail_csv,))
threadings.append(work)
for work in threadings:
work.start()
for work in threadings:
work.join()
if not index_max % 10:
print(index_max)
detail_map = [self.detail_map[k]
for k in sorted(self.detail_map.keys())]
output_file = 'submit_middle' if file_type else 'train_middle'
title_basic = ['活塞工作时长', '发动机转速', '油泵转速', '泵送压力', '液压油温', '流量档位',
'分配压力', '排量电流', '低压开关', '高压开关', '搅拌超压信号', '正泵', '反泵', '设备类型']
# title_min = [index + '_min'for index in title_basic[1:8]]
# title_max = [index + '_max'for index in title_basic[1:8]]
# title_mean = [index + '_mean'for index in title_basic[1:8]]
# title_std = [index + '_std'for index in title_basic[1:8]]
# title_poor = [index + '_poor'for index in title_basic[1:8]]
# title_median = [index + '_median'for index in title_basic[1:8]]
# title_total = [index + '_total'for index in title_basic[1:8]]
# title_hit = [index + '_hit'for index in title_basic[1:8]]
# title_constant = ['label', '活塞工作时长', '低压开关', '正泵', '设备类型', '低压开关&正泵']
# title_collection = [*title_min, *title_mean, *title_max, *title_poor, *title_std, *title_median, *title_total, *title_hit]
# title_collection_diff = [index + '_diff' for index in title_collection]
# title_collection_diff_diff = [
# index + '_diff_diff' for index in title_collection]
# title_collection_diff_diff_diff = [
# index + '_diff_diff_diff' for index in title_collection]
# title_collection_diff_diff_diff2 = [
# index + '_diff_diff_diff2' for index in title_collection]
# title_collection_diff_diff_diff3 = [
# index + '_diff_diff_diff3' for index in title_collection]
# title_collection_ptr = [index + '_pct' for index in title_collection]
# title_collection_ptr_diff = [
# index + '_pct_diff' for index in title_collection]
# title_all = [*title_constant, *title_collection, *title_collection_diff,
# *title_collection_diff_diff, *title_collection_diff_diff_diff,
# *title_collection_ptr, *title_collection_ptr_diff,
# *title_collection_diff_diff_diff2, *title_collection_diff_diff_diff3]
# title_all = [*title_collection_diff_diff_diff2, *title_collection_diff_diff_diff3]
title_skew = [index + '_skew'for index in title_basic[0:8]]
with open(data_path + output_file, 'w') as f:
f.write(",".join(title_skew) + '\n')
# f.write("nunique" + '\n')
f.write("\n".join([str(index) for index in detail_map]))
def load_all(self, file_type):
"""
load all
"""
self.load_basic(file_type)
self.load_detail(file_type)
self.detail_map = {}
def load_all_pickle(self, file_type):
"""
load all
"""
self.load_basic(file_type)
self.load_detail_pickle(file_type)
self.detail_pickle = {}
def load_detail_pickle(self, file_type, block_size=300):
"""
load detail
@param file_type: 1-submit_example, 0-train_labels
"""
id_len = len(self.id2lab.keys())
for block_index in range((id_len - 1) // block_size + 1):
index_min = block_size * block_index
index_max = min(id_len, (block_index + 1) * block_size)
threadings = []
for index in list(self.id2file.keys())[index_min:index_max]:
file_id = self.id2file[index]
work = threading.Thread(
target=self.pre_data_two, args=(index, file_type, file_id,))
threadings.append(work)
for work in threadings:
work.start()
for work in threadings:
work.join()
print(index_max)
output_file = 'submit_middle' if file_type else 'train_middle'
output_file += '.pickle'
dump_bigger(self.detail_pickle, pickle_path + output_file)
def pre_data_list(self, do_pre):
version = begin_time()
df_columns = pd.read_csv(data_path + 'train_middle_total').columns
begin_index = 0
with open(model_path + v + 'columns.csv', 'r') as f:
str_f = f.readline()
if str_f[-1] == '\n':
str_f = str_f[:-1]
good_columns = str_f.split(',')
with open(model_path + v + 'lastcolumn.csv', 'r') as f:
str_f = f.readline()
if str_f[-1] == '\n':
str_f = str_f[:-1]
while df_columns[begin_index] != str_f:
begin_index += 1
self.wait_columns = list(df_columns[begin_index + 6:])
self.good_columns = good_columns
self.basic_f1 = 0
if do_pre == True:
self.load_all(0)
self.load_all(1)
elif do_pre == 2:
self.load_all_pickle(0)
self.load_all_pickle(1)
else:
self.load_basic(1)
end_time(version)
def evaluate_f1(self, preds, train_data):
self.evaluate_num = self.evaluate_num + 1
labels = train_data.get_label()
if not self.evaluate_num % 50:
f1_list = [self.evaulate_model_once(labels, [int(indexs > (index / 100))
for indexs in preds]) for index in range(48, 53)]
max_index = f1_list.index(max(f1_list))
if max_index in self.f1_map:
self.f1_map[max_index] += 1
else:
self.f1_map[max_index] = 1
# print(labels, preds)
return 'f1', f1_list[2], True
else:
preds = [int(index > 0.5) for index in preds]
return 'f1', self.evaulate_model_once(labels, preds), True
def pre_data_two(self, detail_id, file_type, file_id):
file_folder = 'data_test' if file_type else 'data_train'
file_folder += '/'
file_folder += file_id
detail_csv = pd.read_csv(data_path + file_folder)
self.detail_pickle[detail_id] = detail_csv
def pre_data_once(self, detail_id, file_type, label_id, detail_csv):
# detail_basic = detail_csv.agg(['min', 'max', 'std', 'mean', 'median'])
# detail_max = detail_basic.iloc[1]
# detail_time = detail_csv.max()[0]
# detail_time = detail_max[0]
# detail_press = detail_max[8]
# detail_pump = detail_max[11]
# detail_type = detail_max[13]
# detail_add = detail_pump + detail_press
# detail_constant = [label_id, detail_time,
# detail_press, detail_pump, detail_type, detail_add]
# detail_max = detail_max[1:8]
# detail_min = detail_basic.iloc[0, 1:8]
# detail_poor = detail_max - detail_min
# detail_mean = detail_basic.iloc[3, 1:8]
# detail_std = detail_basic.iloc[2, 1:8]
# detail_median = detail_basic.iloc[4, 1:8]
# detail_total = [index * detail_time for index in detail_mean]
# detail_hit = [index * detail_time for index in detail_std]
# detail_collapse = [*detail_min, *detail_mean, *detail_max, *detail_poor,
# *detail_std, *detail_median, *detail_total, *detail_hit]
# del detail_csv['设备类型']
# detail_basic_diff = detail_csv.diff()
# detail_diff_basic = detail_basic_diff.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_diff_min = detail_diff_basic.iloc[0, 1:8]
# detail_diff_max = detail_diff_basic.iloc[1, 1:8]
# detail_diff_poor = detail_diff_max - detail_diff_min
# detail_diff_std = detail_diff_basic.iloc[2, 1:8]
# detail_diff_mean = detail_diff_basic.iloc[3, 1:8]
# detail_diff_median = detail_diff_basic.iloc[4, 1:8]
# detail_diff_total = [index * detail_time for index in detail_diff_mean]
# detail_diff_hit = [index * detail_time for index in detail_diff_std]
# detail_collapse_diff = [*detail_diff_min, *detail_diff_mean, *detail_diff_max, *detail_diff_poor,
# *detail_diff_std, *detail_diff_median, *detail_diff_total, *detail_diff_hit]
# detail_basic_diff_diff = detail_basic_diff.diff()
# detail_diff_diff_basic = detail_basic_diff_diff.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_diff_diff_min = detail_diff_diff_basic.iloc[0, 1:8]
# detail_diff_diff_max = detail_diff_diff_basic.iloc[1, 1:8]
# detail_diff_diff_poor = detail_diff_diff_max - detail_diff_diff_min
# detail_diff_diff_std = detail_diff_diff_basic.iloc[2, 1:8]
# detail_diff_diff_mean = detail_diff_diff_basic.iloc[3, 1:8]
# detail_diff_diff_median = detail_diff_diff_basic.iloc[4, 1:8]
# detail_diff_diff_total = [
# index * detail_time for index in detail_diff_diff_mean]
# detail_diff_diff_hit = [
# index * detail_time for index in detail_diff_diff_mean]
# detail_collapse_diff_diff = [*detail_diff_diff_min, *detail_diff_diff_mean, *detail_diff_diff_max,
# *detail_diff_diff_poor, *detail_diff_diff_std, *detail_diff_diff_median,
# *detail_diff_diff_total, *detail_diff_diff_hit]
# detail_basic_diff_diff_diff = detail_basic_diff_diff.diff()
# detail_diff_diff_diff_basic = detail_basic_diff_diff_diff.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_diff_diff_diff_min = detail_diff_diff_diff_basic.iloc[0, 1:8]
# detail_diff_diff_diff_max = detail_diff_diff_diff_basic.iloc[1, 1:8]
# detail_diff_diff_diff_poor = detail_diff_diff_diff_max - detail_diff_diff_diff_min
# detail_diff_diff_diff_std = detail_diff_diff_diff_basic.iloc[2, 1:8]
# detail_diff_diff_diff_mean = detail_diff_diff_diff_basic.iloc[3, 1:8]
# detail_diff_diff_diff_median = detail_diff_diff_diff_basic.iloc[4, 1:8]
# detail_diff_diff_diff_total = [
# index * detail_time for index in detail_diff_diff_diff_mean]
# detail_diff_diff_diff_hit = [
# index * detail_time for index in detail_diff_diff_diff_mean]
# detail_collapse_diff_diff_diff = [*detail_diff_diff_diff_min, *detail_diff_diff_diff_mean, *detail_diff_diff_diff_max,
# *detail_diff_diff_diff_poor, *detail_diff_diff_diff_std, *detail_diff_diff_diff_median,
# *detail_diff_diff_diff_total, *detail_diff_diff_diff_hit]
# detail_basic_diff_diff_diff2 = detail_basic_diff_diff_diff.diff()
# detail_diff_diff_diff2_basic = detail_basic_diff_diff_diff2.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_diff_diff_diff2_min = detail_diff_diff_diff2_basic.iloc[0, 1:8]
# detail_diff_diff_diff2_max = detail_diff_diff_diff2_basic.iloc[1, 1:8]
# detail_diff_diff_diff2_poor = detail_diff_diff_diff2_max - detail_diff_diff_diff2_min
# detail_diff_diff_diff2_std = detail_diff_diff_diff2_basic.iloc[2, 1:8]
# detail_diff_diff_diff2_mean = detail_diff_diff_diff2_basic.iloc[3, 1:8]
# detail_diff_diff_diff2_median = detail_diff_diff_diff2_basic.iloc[4, 1:8]
# detail_diff_diff_diff2_total = [
# index * detail_time for index in detail_diff_diff_diff2_mean]
# detail_diff_diff_diff2_hit = [
# index * detail_time for index in detail_diff_diff_diff2_mean]
# detail_collapse_diff_diff2_diff = [*detail_diff_diff_diff2_min, *detail_diff_diff_diff2_mean, *detail_diff_diff_diff2_max,
# *detail_diff_diff_diff2_poor, *detail_diff_diff_diff2_std, *detail_diff_diff_diff2_median,
# *detail_diff_diff_diff2_total, *detail_diff_diff_diff2_hit]
# detail_basic_diff_diff_diff3 = detail_basic_diff_diff_diff2.diff()
# detail_diff_diff_diff3_basic = detail_basic_diff_diff_diff3.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_diff_diff_diff3_min = detail_diff_diff_diff3_basic.iloc[0, 1:8]
# detail_diff_diff_diff3_max = detail_diff_diff_diff3_basic.iloc[1, 1:8]
# detail_diff_diff_diff3_poor = detail_diff_diff_diff3_max - detail_diff_diff_diff3_min
# detail_diff_diff_diff3_std = detail_diff_diff_diff3_basic.iloc[2, 1:8]
# detail_diff_diff_diff3_mean = detail_diff_diff_diff3_basic.iloc[3, 1:8]
# detail_diff_diff_diff3_median = detail_diff_diff_diff3_basic.iloc[4, 1:8]
# detail_diff_diff_diff3_total = [
# index * detail_time for index in detail_diff_diff_diff3_mean]
# detail_diff_diff_diff3_hit = [
# index * detail_time for index in detail_diff_diff_diff3_mean]
# detail_collapse_diff_diff3_diff = [*detail_diff_diff_diff3_min, *detail_diff_diff_diff3_mean, *detail_diff_diff_diff3_max,
# *detail_diff_diff_diff3_poor, *detail_diff_diff_diff3_std, *detail_diff_diff_diff3_median,
# *detail_diff_diff_diff3_total, *detail_diff_diff_diff3_hit]
# detail_basic_pct = detail_csv.pct_change()
# detail_pct_change_basic = detail_basic_pct.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_pct_change_min = detail_pct_change_basic.iloc[0, 1:8]
# detail_pct_change_max = detail_pct_change_basic.iloc[1, 1:8]
# detail_pct_change_poor = detail_pct_change_max - detail_pct_change_min
# detail_pct_change_std = detail_pct_change_basic.iloc[2, 1:8]
# detail_pct_change_mean = detail_pct_change_basic.iloc[3, 1:8]
# detail_pct_change_median = detail_pct_change_basic.iloc[4, 1:8]
# detail_pct_change_total = [
# index * detail_time for index in detail_pct_change_mean]
# detail_pct_change_hit = [
# index * detail_time for index in detail_pct_change_std]
# detail_collapse_ptr = [*detail_pct_change_min, *detail_pct_change_mean, *detail_pct_change_max,
# *detail_pct_change_poor, *detail_pct_change_std, *detail_pct_change_median,
# *detail_pct_change_total, *detail_pct_change_hit]
# detail_basic_pct_diff = detail_basic_pct.diff()
# detail_pct_diff_basic = detail_basic_pct_diff.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_pct_diff_min = detail_pct_diff_basic.iloc[0, 1:8]
# detail_pct_diff_max = detail_pct_diff_basic.iloc[1, 1:8]
# detail_pct_diff_poor = detail_pct_diff_max - detail_pct_diff_min
# detail_pct_diff_std = detail_pct_diff_basic.iloc[2, 1:8]
# detail_pct_diff_mean = detail_pct_diff_basic.iloc[3, 1:8]
# detail_pct_diff_median = detail_pct_diff_basic.iloc[4, 1:8]
# detail_pct_diff_total = [
# index * detail_time for index in detail_pct_diff_mean]
# detail_pct_diff_hit = [
# index * detail_time for index in detail_pct_diff_std]
# detail_collapse_pct_diff = [*detail_pct_diff_min, *detail_pct_diff_mean, *detail_pct_diff_max,
# *detail_pct_diff_poor, *detail_pct_diff_std, *detail_pct_diff_median,
# *detail_pct_diff_total, *detail_pct_diff_hit]
# detail = [*detail_constant, *detail_collapse, *detail_collapse_diff,
# *detail_collapse_diff_diff, *detail_collapse_diff_diff_diff,
# *detail_collapse_ptr, *detail_collapse_pct_diff,
# *detail_collapse_diff_diff2_diff, *detail_collapse_diff_diff3_diff]
# detail = [*detail_collapse_diff_diff2_diff, *detail_collapse_diff_diff3_diff]
self.detail_map[detail_id] = ",".join(
[str(index) for index in list(detail_csv.skew()[0:8])])
# self.detail_map[detail_id] = ",".join([str(index) for index in detail])
# self.detail_map[detail_id] = detail_csv['活塞工作时长'].nunique()
def pre_data(self, pre, slices):
"""
prepare data
"""
# detail_type = pd.get_dummies(pre['设备类型'], prefix=pre[['设备类型']].columns[0])
# pre = pre.drop(['设备类型'], axis=1)
# return pd.concat([pre, detail_type], axis=1)
pre['设备类型'] = pre['设备类型'].map(
{'ZV252': 0, 'ZV573': 1, 'ZV63d': 2, 'ZVa78': 3, 'ZVa9c': 4, 'ZVe44': 4, 'ZVfd4': 5})
if slices is None:
return pre
else:
columns_total = pre.columns
if not slices:
wait_columns = [*columns_total[:128], *columns_total[188:198]]
if slices == 11:
wait_columns = [*columns_total[:128]]
elif slices == 1:
wait_columns = [*columns_total[:128], *columns_total[178:198]]
elif slices == 2:
wait_columns = [*columns_total[:128], *
columns_total[178:198], *columns_total[218:228]]
elif slices < 9:
wait_columns = [*columns_total[:128], *columns_total[178:198], *
columns_total[218:228], *columns_total[88 + slices * 10:98 + slices * 10]]
else:
wait_columns = [*columns_total[:128], *columns_total[178:198], *
columns_total[218:228], *columns_total[108 + slices * 10:118 + slices * 10]]
# columns = [*columns_total[:118], *columns_total[178:188], *columns_total[118 + slices * 10:128 + slices * 10]]
# columns = columns_total[:118] + [:118 + 10 * slices]
# wait_columns = self.good_columns
# if slices != -1:
# wait_columns = [*wait_columns, self.wait_columns[slices]]
wait = | pd.DataFrame(pre, columns=wait_columns) | pandas.DataFrame |
import json
import pandas as pd
pd.set_option('display.max_rows', 30)
pd.set_option('display.max_columns', 50)
pd.set_option('display.width', 1200)
import matplotlib.pyplot as plt
import seaborn as sns # used for plot interactive graph.
import warnings
warnings.filterwarnings('ignore')
def load_tmdb_movies(path):
df = pd.read_csv(path)
df['release_date'] = pd.to_datetime(df['release_date']).apply(lambda x: x.date())
json_columns = ['genres', 'keywords', 'production_countries', 'production_companies', 'spoken_languages']
for column in json_columns:
df[column] = df[column].apply(json.loads)
return df
def load_tmdb_credits(path):
df = pd.read_csv(path)
json_columns = ['cast', 'crew']
for column in json_columns:
df[column] = df[column].apply(json.loads)
return df
# return a missing value rather than an error upon indexing/key failure
def safe_access(container, index_values):
result = container
try:
for idx in index_values:
result = result[idx]
return result
except IndexError or KeyError:
return pd.np.nan
def get_director(crew_data):
directors = [x['name'] for x in crew_data if x['job'] == 'Director']
return safe_access(directors, [0])
def pipe_flatten_names(keywords):
return '|'.join([x['name'] for x in keywords])
# TODO: либо переделать это здесь, либо заменить этим мёрдж данных в CBF
def convert_to_original_format(movies, credits):
tmdb_movies = movies.copy()
tmdb_movies['title_year'] = pd.to_datetime(tmdb_movies['release_date']).apply(lambda x: x.year)
tmdb_movies['country'] = tmdb_movies['production_countries'].apply(lambda x: safe_access(x, [0, 'name']))
tmdb_movies['language'] = tmdb_movies['spoken_languages'].apply(lambda x: safe_access(x, [0, 'name']))
tmdb_movies['director_name'] = credits['crew'].apply(get_director)
tmdb_movies['actor_1_name'] = credits['cast'].apply(lambda x: safe_access(x, [0, 'name']))
tmdb_movies['actor_2_name'] = credits['cast'].apply(lambda x: safe_access(x, [1, 'name']))
tmdb_movies['actor_3_name'] = credits['cast'].apply(lambda x: safe_access(x, [2, 'name']))
tmdb_movies['companies_1'] = tmdb_movies['production_companies'].apply(lambda x: safe_access(x, [0, 'name']))
tmdb_movies['companies_2'] = tmdb_movies['production_companies'].apply(lambda x: safe_access(x, [1, 'name']))
tmdb_movies['companies_3'] = tmdb_movies['production_companies'].apply(lambda x: safe_access(x, [2, 'name']))
tmdb_movies['genres'] = tmdb_movies['genres'].apply(pipe_flatten_names)
tmdb_movies['keywords'] = tmdb_movies['keywords'].apply(pipe_flatten_names)
return tmdb_movies
# For list datatypes
def Obtain_list_Occurences(column_name, data):
listOcc = []
for i in data[column_name]:
split_genre = list(map(str, i.split('|')))
for j in split_genre:
if j not in listOcc:
listOcc.append(j)
return listOcc
def count_word(df, ref_col, liste):
keyword_count = dict()
for s in liste: keyword_count[s] = 0
for liste_keywords in df[ref_col].str.split('|'):
if type(liste_keywords) == float and pd.isnull(liste_keywords): continue
for s in [s for s in liste_keywords if s in liste]:
if pd.notnull(s): keyword_count[s] += 1
# convert the dictionary in a list to sort the keywords by frequency
keyword_occurences = []
for k,v in keyword_count.items():
keyword_occurences.append([k,v])
keyword_occurences.sort(key = lambda x:x[1], reverse = True)
return keyword_occurences, keyword_count
def TopTen(theList):
TopTen = list()
for i in range(0, 10):
TopTen.append(theList[i][0])
return TopTen
def show_heatmap(data):
plt.figure(figsize=(10, 10))
g = sns.heatmap(data[list(data)].corr(), annot=True, fmt=".2f", cmap="coolwarm", linewidths=0.01)
plt.show(g)
def show_joint(data, x, y):
plt.figure(figsize=(10, 10))
g = sns.jointplot(x=x, y=y, data=data);
plt.show(g)
def show_regplot(data, x, y):
plt.figure(figsize=(10, 10))
g = sns.regplot(x=x, y=y, data=data)
plt.show(g)
def to_frequency_table(data):
frequencytable = {}
for key in data:
if key in frequencytable:
frequencytable[key] += 1
else:
frequencytable[key] = 1
return frequencytable
movies = load_tmdb_movies("../dataset/tmdb_5000_movies.csv")
credits = load_tmdb_credits("../dataset/tmdb_5000_credits.csv")
data = convert_to_original_format(movies, credits)
# print(data.head())
# missing data
total = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = | pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) | pandas.concat |
"""This script is designed to perform statistics of demographic information
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr,spearmanr,kendalltau
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import os
from eslearn.utils.lc_read_write_mat import read_mat, write_mat
#%% ----------------------------------Our center 550----------------------------------
uid_path_550 = r'D:\WorkStation_2018\SZ_classification\Scale\selected_550.txt'
scale_path_550 = r'D:\WorkStation_2018\SZ_classification\Scale\10-24大表.xlsx'
headmotion_file = r'D:\WorkStation_2018\SZ_classification\Scale\头动参数_1322.xlsx'
scale_data_550 = pd.read_excel(scale_path_550)
uid_550 = pd.read_csv(uid_path_550, header=None)
scale_selected_550 = pd.merge(uid_550, scale_data_550, left_on=0, right_on='folder', how='inner')
describe_bprs_550 = scale_selected_550.groupby('诊断')['BPRS_Total'].describe()
describe_age_550 = scale_selected_550.groupby('诊断')['年龄'].describe()
describe_duration_550 = scale_selected_550.groupby('诊断')['病程月'].describe()
describe_durgnaive_550 = scale_selected_550.groupby('诊断')['用药'].value_counts()
describe_sex_550 = scale_selected_550.groupby('诊断')['性别'].value_counts()
# Demographic
demographic_info_dataset1 = scale_selected_550[['folder', '诊断', '年龄', '性别', '病程月']]
headmotion = pd.read_excel(headmotion_file)
headmotion = headmotion[['Subject ID','mean FD_Power']]
demographic_info_dataset1 = pd.merge(demographic_info_dataset1, headmotion, left_on='folder', right_on='Subject ID', how='inner')
demographic_info_dataset1 = demographic_info_dataset1.drop(columns=['Subject ID'])
site_dataset1 = pd.DataFrame(np.zeros([len(demographic_info_dataset1),1]))
site_dataset1.columns = ['site']
demographic_dataset1_all = pd.concat([demographic_info_dataset1 , site_dataset1], axis=1)
demographic_dataset1_all.columns = ['ID','Diagnosis', 'Age', 'Sex', 'Duration', 'MeanFD', 'Site']
demographic_dataset1 = demographic_dataset1_all[['ID','Diagnosis', 'Age', 'Sex', 'MeanFD', 'Site']]
demographic_dataset1['Diagnosis'] = np.int32(demographic_dataset1['Diagnosis'] == 3)
# Duration and age
demographic_duration_dataset1 = demographic_dataset1_all[['Duration', 'Age']].dropna()
np.corrcoef(demographic_duration_dataset1['Duration'], demographic_duration_dataset1['Age'])
pearsonr(demographic_duration_dataset1['Duraton'], demographic_duration_dataset1['Age'])
#%% ----------------------------------BeiJing 206----------------------------------
uid_path_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_NC_108_100.xlsx'
scale_path_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_NC_108_100-WF.csv'
headmotion_file_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\parameters\FD_power'
uid_to_remove = ['SZ010109','SZ010009']
scale_data_206 = pd.read_csv(scale_path_206)
scale_data_206 = scale_data_206.drop(np.array(scale_data_206.index)[scale_data_206['ID'].isin(uid_to_remove)])
scale_data_206['PANSStotal1'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['PANSStotal1'].values])
Pscore = pd.DataFrame(scale_data_206[['P1', 'P2', 'P3', 'P4', 'P4', 'P5', 'P6', 'P7']].iloc[:106,:], dtype = np.float64)
Pscore = np.sum(Pscore, axis=1).describe()
Nscore = pd.DataFrame(scale_data_206[['N1', 'N2', 'N3', 'N4', 'N4', 'N5', 'N6', 'N7']].iloc[:106,:], dtype=np.float64)
Nscore = np.sum(Nscore, axis=1).describe()
Gscore = pd.DataFrame(scale_data_206[['G1', 'G2', 'G3', 'G4', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10', 'G11', 'G12', 'G13', 'G14', 'G15', 'G16']].iloc[:106,:])
Gscore = np.array(Gscore)
for i, itemi in enumerate(Gscore):
for j, itemj in enumerate(itemi):
print(itemj)
if itemj.strip() != '':
Gscore[i,j] = np.float64(itemj)
else:
Gscore[i, j] = np.nan
Gscore = pd.DataFrame(Gscore)
Gscore = np.sum(Gscore, axis=1).describe()
describe_panasstotol_206 = scale_data_206.groupby('group')['PANSStotal1'].describe()
describe_age_206 = scale_data_206.groupby('group')['age'].describe()
scale_data_206['duration'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['duration'].values])
describe_duration_206 = scale_data_206.groupby('group')['duration'].describe()
describe_sex_206 = scale_data_206.groupby('group')['sex'].value_counts()
# Demographic
uid = pd.DataFrame(scale_data_206['ID'])
uid['ID'] = uid['ID'].str.replace('NC','10');
uid['ID'] = uid['ID'].str.replace('SZ','20');
uid = pd.DataFrame(uid, dtype=np.int32)
demographic_info_dataset2 = scale_data_206[['group','age', 'sex']]
demographic_info_dataset2 = pd.concat([uid, demographic_info_dataset2], axis=1)
headmotion_name_dataset2 = os.listdir(headmotion_file_206)
headmotion_file_path_dataset2 = [os.path.join(headmotion_file_206, name) for name in headmotion_name_dataset2]
meanfd = []
for i, file in enumerate(headmotion_file_path_dataset2):
fd = np.loadtxt(file)
meanfd.append(np.mean(fd))
meanfd_dataset2 = pd.DataFrame(meanfd)
headmotion_name_dataset2 = pd.Series(headmotion_name_dataset2)
headmotion_name_dataset2 = headmotion_name_dataset2.str.findall('(NC.*[0-9]\d*|SZ.*[0-9]\d*)')
headmotion_name_dataset2 = [str(id[0]) if id != [] else 0 for id in headmotion_name_dataset2]
headmotion_name_dataset2 = pd.DataFrame([''.join(id.split('_')) if id != 0 else '0' for id in headmotion_name_dataset2])
headmotion_name_dataset2[0] = headmotion_name_dataset2[0].str.replace('NC','10');
headmotion_name_dataset2[0] = headmotion_name_dataset2[0].str.replace('SZ','20');
headmotion_name_dataset2 = pd.DataFrame(headmotion_name_dataset2, dtype=np.int32)
headmotion_name_dataset2 = pd.concat([headmotion_name_dataset2, meanfd_dataset2], axis=1)
headmotion_name_dataset2.columns = ['ID','meanFD']
demographic_dataset2 = pd.merge(demographic_info_dataset2, headmotion_name_dataset2, left_on='ID', right_on='ID', how='left')
site_dataset2 = pd.DataFrame(np.ones([len(demographic_dataset2),1]))
site_dataset2.columns = ['site']
demographic_dataset2 = pd.concat([demographic_dataset2, site_dataset2], axis=1)
demographic_dataset2.columns = ['ID', 'Diagnosis', 'Age', 'Sex', 'MeanFD', 'Site']
demographic_dataset2['Diagnosis'] = np.int32(demographic_dataset2['Diagnosis'] == 1)
duration_dataset2 = pd.concat([uid, scale_data_206['duration']], axis=1)
demographic_duration_dataset2 = pd.merge(duration_dataset2, demographic_dataset2, left_on='ID', right_on='ID')
demographic_duration_dataset2 = demographic_duration_dataset2.iloc[:106,:]
pearsonr(demographic_duration_dataset2['duration'], demographic_duration_dataset2['Age'])
#%% -------------------------COBRE----------------------------------
# Inputs
matroot = r'D:\WorkStation_2018\SZ_classification\Data\SelectedFC_COBRE' # all mat files directory
scale = r'H:\Data\精神分裂症\COBRE\COBRE_phenotypic_data.csv' # whole scale path
headmotion_file_COBRE = r'D:\WorkStation_2018\SZ_classification\Data\headmotion\cobre\HeadMotion.tsv'
duration_COBRE = r'D:\WorkStation_2018\SZ_classification\Scale\COBRE_duration.xlsx'
# Transform the .mat files to one .npy file
allmatname = os.listdir(matroot)
# Give labels to each subject, concatenate at the first column
allmatname = pd.DataFrame(allmatname)
allsubjname = allmatname.iloc[:,0].str.findall(r'[1-9]\d*')
allsubjname = pd.DataFrame([name[0] for name in allsubjname])
scale_data = pd.read_csv(scale,sep=',',dtype='str')
print(scale_data)
diagnosis = pd.merge(allsubjname,scale_data,left_on=0,right_on='ID')[['ID','Subject Type']]
scale_data = pd.merge(allsubjname,scale_data,left_on=0,right_on='ID')
diagnosis['Subject Type'][diagnosis['Subject Type'] == 'Control'] = 0
diagnosis['Subject Type'][diagnosis['Subject Type'] == 'Patient'] = 1
include_loc = diagnosis['Subject Type'] != 'Disenrolled'
diagnosis = diagnosis[include_loc.values]
allsubjname = allsubjname[include_loc.values]
scale_data_COBRE = pd.merge(allsubjname, scale_data, left_on=0, right_on=0, how='inner').iloc[:,[0,1,2,3,5]]
scale_data_COBRE['Gender'] = scale_data_COBRE['Gender'].str.replace('Female', '0')
scale_data_COBRE['Gender'] = scale_data_COBRE['Gender'].str.replace('Male', '1')
scale_data_COBRE['Subject Type'] = scale_data_COBRE['Subject Type'].str.replace('Patient', '1')
scale_data_COBRE['Subject Type'] = scale_data_COBRE['Subject Type'].str.replace('Control', '0')
scale_data_COBRE = pd.DataFrame(scale_data_COBRE, dtype=np.float64)
describe_age_COBRE = scale_data_COBRE.groupby('Subject Type')['Current Age'].describe()
describe_sex_COBRE = scale_data_COBRE.groupby('Subject Type')['Gender'].value_counts()
headmotion_COBRE = pd.read_csv(headmotion_file_COBRE,sep='\t', index_col=False)
headmotion_COBRE = headmotion_COBRE[['Subject ID', 'mean FD_Power']]
scale_data['ID'] = pd.DataFrame(scale_data['ID'], dtype=np.int32)
demographic_COBRE = | pd.merge(scale_data, headmotion_COBRE, left_on='ID', right_on='Subject ID', how='inner') | pandas.merge |
import numpy as np
import pandas as pd
import time
from pathlib import Path
from experiments.evaluation import calculate_metrics
from causal_estimators.ipw_estimator import IPWEstimator
from causal_estimators.standardization_estimator import \
StandardizationEstimator, StratifiedStandardizationEstimator
from experiments.evaluation import run_model_cv
from loading import load_from_folder
from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier
from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC
from sklearn.kernel_ridge import KernelRidge
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.gaussian_process import GaussianProcessClassifier, GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, GradientBoostingRegressor,\
RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.exceptions import UndefinedMetricWarning
import warnings
warnings.simplefilter(action='ignore', category=UndefinedMetricWarning)
# warnings.filterwarnings("ignore", message="UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 due to no predicted samples. Use `zero_division` parameter to control this behavior.")
RESULTS_DIR = Path('results')
alphas = {'alpha': np.logspace(-4, 5, 10)}
# gammas = [] + ['scale']
Cs = np.logspace(-4, 5, 10)
d_Cs = {'C': Cs}
SVM = 'svm'
d_Cs_pipeline = {SVM + '__C': Cs}
max_depths = list(range(2, 10 + 1)) + [None]
d_max_depths = {'max_depth': max_depths}
d_max_depths_base = {'base_estimator__max_depth': max_depths}
Ks = {'n_neighbors': [1, 2, 3, 5, 10, 15, 25, 50, 100, 200]}
OUTCOME_MODEL_GRID = [
('LinearRegression', LinearRegression(), {}),
('LinearRegression_interact',
make_pipeline(PolynomialFeatures(degree=2, interaction_only=True),
LinearRegression()),
{}),
('LinearRegression_degree2',
make_pipeline(PolynomialFeatures(degree=2), LinearRegression()), {}),
# ('LinearRegression_degree3',
# make_pipeline(PolynomialFeatures(degree=3), LinearRegression()), {}),
('Ridge', Ridge(), alphas),
('Lasso', Lasso(), alphas),
('ElasticNet', ElasticNet(), alphas),
('KernelRidge', KernelRidge(), alphas),
('SVM_rbf', SVR(kernel='rbf'), d_Cs),
('SVM_sigmoid', SVR(kernel='sigmoid'), d_Cs),
('LinearSVM', LinearSVR(), d_Cs),
# (SVR(kernel='linear'), d_Cs), # doesn't seem to work (runs forever)
# TODO: add tuning of SVM gamma, rather than using the default "scale" setting
# SVMs are sensitive to input scale
('Standardized_SVM_rbf', Pipeline([('standard', StandardScaler()), (SVM, SVR(kernel='rbf'))]),
d_Cs_pipeline),
('Standardized_SVM_sigmoid', Pipeline([('standard', StandardScaler()), (SVM, SVR(kernel='sigmoid'))]),
d_Cs_pipeline),
('Standardized_LinearSVM', Pipeline([('standard', StandardScaler()), (SVM, LinearSVR())]),
d_Cs_pipeline),
('kNN', KNeighborsRegressor(), Ks),
# GaussianProcessRegressor(),
# TODO: also cross-validate over min_samples_split and min_samples_leaf
('DecisionTree', DecisionTreeRegressor(), d_max_depths),
# ('RandomForest', RandomForestRegressor(), d_max_depths),
# TODO: also cross-validate over learning_rate
# ('AdaBoost', AdaBoostRegressor(base_estimator=DecisionTreeRegressor(max_depth=None)), d_max_depths_base),
# ('GradientBoosting', GradientBoostingRegressor(), d_max_depths),
# MLPRegressor(max_iter=1000),
# MLPRegressor(alpha=1, max_iter=1000),
]
PROP_SCORE_MODEL_GRID = [
('LogisticRegression_l2', LogisticRegression(penalty='l2'), d_Cs),
('LogisticRegression', LogisticRegression(penalty='none'), {}),
('LogisticRegression_l2_liblinear', LogisticRegression(penalty='l2', solver='liblinear'), d_Cs),
('LogisticRegression_l1_liblinear', LogisticRegression(penalty='l1', solver='liblinear'), d_Cs),
('LogisticRegression_l1_saga', LogisticRegression(penalty='l1', solver='saga'), d_Cs),
('LDA', LinearDiscriminantAnalysis(), {}),
('LDA_shrinkage', LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto'), {}),
('QDA', QuadraticDiscriminantAnalysis(), {}),
# TODO: add tuning of SVM gamma, rather than using the default "scale" setting
('SVM_rbf', SVC(kernel='rbf', probability=True), d_Cs),
('SVM_sigmoid', SVC(kernel='sigmoid', probability=True), d_Cs),
# ('SVM_linear', SVC(kernel='linear', probability=True), d_Cs), # doesn't seem to work (runs forever)
# SVMs are sensitive to input scale
('Standardized_SVM_rbf', Pipeline([('standard', StandardScaler()), (SVM, SVC(kernel='rbf', probability=True))]),
d_Cs_pipeline),
('Standardized_SVM_sigmoid', Pipeline([('standard', StandardScaler()),
(SVM, SVC(kernel='sigmoid', probability=True))]),
d_Cs_pipeline),
# ('Standardized_SVM_linear', Pipeline([('standard', StandardScaler()),
# (SVM, SVC(kernel='linear', probability=True))]),
# d_Cs_pipeline), # doesn't seem to work (runs forever)
('kNN', KNeighborsClassifier(), Ks),
# GaussianProcessClassifier(),
('GaussianNB', GaussianNB(), {}),
# TODO: also cross-validate over min_samples_split and min_samples_leaf
('DecisionTree', DecisionTreeClassifier(), d_max_depths),
# ('RandomForest', RandomForestClassifier(), max_depths),
# TODO: also cross-validate over learning_rate
# ('AdaBoost', AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=None)), d_max_depths_base),
# ('GradientBoosting', GradientBoostingClassifier(), d_max_depths),
# MLPClassifier(max_iter=1000),
# MLPClassifier(alpha=1, max_iter=1000),
]
psid_gen_model, args = load_from_folder(dataset='lalonde_psid1')
cps_gen_model, args = load_from_folder(dataset='lalonde_cps1')
twins_gen_model, args = load_from_folder(dataset='twins')
psid_ate = psid_gen_model.ate(noisy=True)
psid_ite = psid_gen_model.ite(noisy=True).squeeze()
cps_ate = cps_gen_model.ate(noisy=True)
cps_ite = cps_gen_model.ite(noisy=True).squeeze()
twins_ate = twins_gen_model.ate(noisy=False)
twins_ite = twins_gen_model.ite(noisy=False).squeeze()
GEN_MODELS = [
('lalonde_psid', psid_gen_model, psid_ate, psid_ite),
('lalonde_cps', cps_gen_model, cps_ate, cps_ite),
('twins', twins_gen_model, twins_ate, twins_ite)
]
t_start = time.time()
N_SEEDS_CV = 5
N_SEEDS_METRICS = 5
def run_experiments_for_estimator(get_estimator_func, model_grid, save_location,
meta_est_name, model_type, exclude=[],
gen_models=GEN_MODELS, n_seeds_cv=N_SEEDS_CV,
n_seeds_metrics=N_SEEDS_METRICS):
# if outcome_model_grid is None and prop_score_model_grid is None:
# raise ValueError('Either outcome_model_grid or prop_score_model_grid must be not None.')
# if outcome_model_grid is not None and prop_score_model_grid is not None:
# raise ValueError('Currently only supporting one non-None model grid.')
# outcome_modeling = outcome_model_grid is not None
# model_grid = outcome_model_grid if outcome_modeling else prop_score_model_grid
# model_type = 'outcome' if outcome_modeling else 'prop_score'
valid_model_types = ['outcome', 'prop_score']
if model_type not in valid_model_types:
raise ValueError('Invalid model_type... Valid model_types: {}'.format(valid_model_types))
param_str = 'params_' + model_type + '_model'
dataset_dfs = []
for gen_name, gen_model, ate, ite in gen_models:
print('DATASET:', gen_name)
dataset_start = time.time()
model_dfs = []
for model_name, model, param_grid in model_grid:
print('MODEL:', model_name)
if (gen_name, model_name) in exclude or model_name in exclude:
print('SKIPPING')
continue
model_start = time.time()
results = run_model_cv(gen_model, model, model_name=model_name, param_grid=param_grid,
n_seeds=n_seeds_cv, model_type=model_type, best_model=False, ret_time=False)
metrics_list = []
for params in results[param_str]:
try:
est_start = time.time()
estimator = get_estimator_func(model.set_params(**params))
metrics = calculate_metrics(gen_model, estimator, n_seeds=n_seeds_metrics,
conf_ints=False, ate=ate, ite=ite)
est_end = time.time()
# Add estimator fitting time in minutes
metrics['time'] = (est_end - est_start) / 60
metrics_list.append(metrics)
except ValueError:
print('Skipping {} params: {}'.format(model_name, params))
causal_metrics = pd.DataFrame(metrics_list)
model_df = | pd.concat([results, causal_metrics], axis=1) | pandas.concat |
import json
import pandas as pd
from .RelationshipHelper import RelationshipHelper
from .TwinHelper import TwinHelper
from .QueryHelper import QueryHelper
class DeployHelper:
def __init__(self, host_name, token_path=None, token=None):
self.__rh = RelationshipHelper(
host_name=host_name, token_path=token_path, token=token)
self.__th = TwinHelper(host_name=host_name, token=self.__rh.get_token())
self.__qh = QueryHelper(host_name=host_name, token=self.__rh.get_token())
##
# Deploy digital twins with a csv file.
# The columns should be 'modelid', 'dtid', 'init_property', 'init_component', 'rname', 'rtarget', 'init_rproperty'
# 'init_property', 'init_component' and 'init_rproperty' are optional columns.
# 'modelid': model ID
# 'dtid': Twin ID
# 'init_property': (JSON format) Can be empty, the initial value of properties
# 'init_component': (JSON format) Can be empty, the initial value of components
# 'rname': Relationship name, if 'rname' is specified, 'rtarget' is required.
# If multiple relationships are required, just add a new line without 'modelid' and using an existing 'dtid'.
# 'rtarget': Target twin ID if a relationship is specified
# 'init_rproperty': Initial value of properties of relationship if a relationship is specified.
##
def csv_deploy(self, path, atomic=True):
# read csv
df = pd.read_csv(path)
# a list for relationships, used for creating relationships after twins are creating.
relationships_storage = list()
# used for making this process atomic, if something failed, remove all things in these two list
if atomic:
succeed_twins = list()
succeed_relationships = list()
else:
failed_twins = list()
failed_relationships = list()
# check if optional columns exist
has_init_property = False
has_init_component = False
has_init_rproperty = False
for c in df.columns:
if c == 'init_property':
has_init_property = True
elif c == 'init_component':
has_init_component = True
elif c == 'init_rproperty':
has_init_rproperty = True
for _, row in df.iterrows():
modelid = row['modelid']
dtid = row['dtid']
# check empty data
if not has_init_property or pd.isna(row['init_property']):
init_property = {}
else:
init_property = json.loads(row['init_property'])
if not has_init_component or pd.isna(row['init_component']):
init_component = {}
else:
init_component = json.loads(row['init_component'])
if not has_init_rproperty or pd.isna(row['init_rproperty']):
init_rproperty = {}
else:
init_rproperty = json.loads(row['init_rproperty'])
rname = row['rname']
rtarget = row['rtarget']
if not pd.isna(modelid):
try:
self.__th.add_twin(
dtid=dtid,
model=modelid,
init_property=init_property,
init_component=init_component
)
print('Add DT: dtid={}, modelid={}'.format(dtid, modelid))
if atomic:
succeed_twins.append(dtid)
except Exception as e:
print('Exception:', e)
if atomic:
self.__atomic(succeed_twins)
return None
else:
failed_twins.append(
(modelid, dtid, init_property, init_component))
# avoid adding relationship before the target is created, store it first
if not | pd.isna(rtarget) | pandas.isna |
from tqdm import tqdm
import numpy as np
from scipy import sparse
import os
import gensim.models
import pandas as pd
import src.utils as utils
from sklearn.ensemble import RandomForestRegressor
from src.features.w2v import reduce_dimensions, plot_with_plotly
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
class Node2Vec():
def __init__(self, indir, n=1, p=2, q=1, walk_length=100, test=False, test_offset=0):
self.indir = indir
self.offset = test_offset
outdir = os.path.join(indir, 'walks')
if not os.path.exists(outdir):
os.mkdir(outdir)
fp = os.path.join(
outdir, f'node2vec_n={n}_p={p}_q={q}_wl={walk_length}.cor'
)
if test:
fp = os.path.join(
outdir, f'node2vec_n={n}_p={p}_q={q}_wl={walk_length}_test.cor'
)
self.corpus_path = fp
self.n = n
self.p = p
self.q = q
self.walk_length = walk_length
def get_api_neighbors_A(self, app):
"""Get all API neighbors of an APP from A matrix"""
assert app.startswith('app_')
app_id = int(app.split('_')[1])
neighbor_ids = np.nonzero(self.A_tr_csr[app_id])[1]
return np.array([f'api_{s}' for s in neighbor_ids])
def get_app_neighbors_A(self, api):
"""Get all APP neighbors of an API from A matrix"""
assert api.startswith('api_')
api_id = int(api.split('_')[1])
neighbor_ids = np.nonzero(self.A_tr_csc[:, api_id])[0]
return np.array([f'app_{s}' for s in neighbor_ids])
def get_api_neighbors_B(self, api):
"""Get all API neighbors of an API from B matrix"""
assert api.startswith('api_')
api_id = int(api.split('_')[1])
neighbor_ids = np.nonzero(self.B_tr[:, api_id])[0]
ls = [f'api_{s}' for s in neighbor_ids]
ls.remove(api)
return np.array(ls)
def get_api_neighbors_P(self, api):
"""Get all API neighbors of an API from P matrix"""
assert api.startswith('api_')
api_id = int(api.split('_')[1])
neighbor_ids = np.nonzero(self.P_tr[:, api_id])[0]
ls = [f'api_{s}' for s in neighbor_ids]
ls.remove(api)
return np.array(ls)
def all_neighbors_from_api(self, api):
"""Get all API neighbors of an APP from all matrices (B and P)"""
assert api.startswith('api_')
api_id = int(api.split('_')[1])
nbr_apis = np.concatenate([
self.get_api_neighbors_B(api),
self.get_api_neighbors_P(api)
])
nbr_apis = np.unique(nbr_apis)
nbr_apps = self.get_app_neighbors_A(api)
# weights later? no
return nbr_apis, nbr_apps
def perform_one_walk_full(self, p=1, q=1, walk_length=20, app=None):
path = []
if app is None:
app = 'app_' + str(np.random.choice(np.arange(self.A_tr_csr.shape[0])))
prev_nbrs = self.get_api_neighbors_A(app)
curr_node = np.random.choice(prev_nbrs)
prev_node = app
path.append(app)
path.append(curr_node)
for i in range(walk_length - 2):
if curr_node.startswith('api_'):
nbr_apis, nbr_apps = self.all_neighbors_from_api(curr_node)
curr_nbrs = np.concatenate([nbr_apis, nbr_apps])
elif curr_node.startswith('app_'):
curr_nbrs = self.get_api_neighbors_A(curr_node)
else: raise AssertionError
alpha_1 = np.intersect1d(prev_nbrs, curr_nbrs, assume_unique=True)
alpha_p = prev_node
alpha_q = np.setdiff1d(
np.setdiff1d(curr_nbrs, alpha_1, assume_unique=True),
[alpha_p], assume_unique=True
)
alphas = [*alpha_1, alpha_p, *alpha_q]
assert len(alpha_1) + len(alpha_q) + 1 == len(curr_nbrs)
probs_q = np.full(len(alpha_q), 1/q/len(alpha_q)) if len(alpha_q) else []
probs_1 = np.full(len(alpha_1), 1/len(alpha_1)) if len(alpha_1) else []
probs = [*probs_1, 1/p, *probs_q]
probs = np.array(probs) / sum(probs)
new_node = np.random.choice(alphas, p=probs)
path.append(new_node)
prev_node = curr_node
prev_nbrs = curr_nbrs
curr_node = new_node
return path
def perform_one_walk_metapath(self, p=1, q=1, walk_length=20, app=None, metapath='APA'):
path = []
if metapath == 'APA':
path_stages = ['A', 'P']
if app is None:
app = 'app_' + str(np.random.choice(range(self.A_tr_csr.shape[0])))
prev_nbrs = self.get_api_neighbors_A(app)
curr_node = np.random.choice(prev_nbrs)
prev_node = app
path.append(app)
path.append(curr_node)
prev_stage = 'A'
for i in range(walk_length - 2):
stage = path_stages[
(path_stages.index(prev_stage) + 1) % len(path_stages)
]
print(prev_stage, stage)
# if curr_node.startswith('api_'):
# nbr_apis, nbr_apps = self.all_neighbors_from_api(curr_node)
# curr_nbrs = np.concatenate([nbr_apis, nbr_apps])
# elif curr_node.startswith('app_'):
# curr_nbrs = self.get_api_neighbors_A(curr_node)
# else: raise AssertionError
if stage.startswith('A'):
assert curr_node.startswith('app_')
curr_nbrs = self.get_api_neighbors_A(curr_node)
elif stage.startswith('B'):
assert curr_node.startswith('api_')
nbr_apps = self.get_app_neighbors_A(curr_node)
nbr_apis = self.get_api_neighbors_B(curr_node)
curr_nbrs = np.concatenate([nbr_apis, nbr_apps])
elif stage.startswith('P'):
assert curr_node.startswith('api_')
nbr_apps = self.get_app_neighbors_A(curr_node)
nbr_apis = self.get_api_neighbors_P(curr_node)
curr_nbrs = np.concatenate([nbr_apis, nbr_apps])
else: raise AssertionError
alpha_1 = np.intersect1d(prev_nbrs, curr_nbrs, assume_unique=True)
alpha_p = prev_node
alpha_q = np.setdiff1d(
np.setdiff1d(curr_nbrs, alpha_1, assume_unique=True),
[alpha_p], assume_unique=True
)
alphas = [*alpha_1, *alpha_q, alpha_p]
# print(len(alpha_1), len(alpha_q), len(curr_nbrs))
print(prev_node, curr_node)
# print(np.setdiff1d(alphas, curr_nbrs))
# print(np.setdiff1d(curr_nbrs, alphas))
assert len(alphas) == len(curr_nbrs)
probs_1 = np.full(len(alpha_1), 1/len(alpha_1)) if len(alpha_1) else []
probs_q = np.full(len(alpha_q), 1/q/len(alpha_q)) if len(alpha_q) else []
probs = [*probs_1, *probs_q, 1/p]
probs = np.array(probs) / sum(probs)
new_node = np.random.choice(alphas, p=probs)
print(new_node)
if new_node in alpha_1:
prev_stage = prev_stage
elif new_node == alpha_p:
prev_stage = prev_stage
elif new_node in alpha_q:
prev_stage = stage
else: raise Error('Something went really wrong')
path.append(new_node)
prev_node = curr_node
prev_nbrs = curr_nbrs
curr_node = new_node
return path
def perform_walks(self, n, p, q, walk_length):
# n is how many paths from each app
n_apps_tr = self.A_tr_csr.shape[0]
walks = []
for app_i in tqdm(range(n_apps_tr)):
app = 'app_' + str(app_i)
for j in range(n):
path = self.perform_one_walk_full(p, q, walk_length, app=app)
walks.append(path)
return walks
def load_matrix(self):
indir = self.indir
A_tr = sparse.load_npz(os.path.join(indir, 'A_reduced_tr.npz'))
A_tst = sparse.load_npz(os.path.join(indir, 'A_reduced_tst.npz'))
B_tr = sparse.load_npz(os.path.join(indir, 'B_reduced_tr.npz'))
P_tr = sparse.load_npz(os.path.join(indir, 'P_reduced_tr.npz'))
meta_tr = pd.read_csv(os.path.join(indir, 'meta_tr.csv'), index_col=0)
meta_tst = pd.read_csv(os.path.join(indir, 'meta_tst.csv'), index_col=0)
assert 'csr_matrix' in str(type(A_tr))
self.A_tr_csr = A_tr
self.A_tr_csc = A_tr.tocsc(copy=True)
self.A_tst = A_tst
self.B_tr = B_tr
self.P_tr = P_tr
self.train_label = pd.read_csv(os.path.join(indir, 'meta_tr.csv'), index_col=None).rename(columns={'Unnamed: 0':'app_id'}).set_index('app_id')['label'].to_dict()
self.test_label = pd.read_csv(os.path.join(indir, 'meta_tst.csv'), index_col=None).rename(columns={'Unnamed: 0':'app_id'}).set_index('app_id')['label'].to_dict()
self.meta_tr = meta_tr
self.meta_tst = meta_tst
self.num_train = A_tr.shape[0]
def save_corpus(self):
walks = self.perform_walks(n=self.n, p=self.p, q=self.q, walk_length=self.walk_length)
# add an offset for every app if in test mode
if self.offset > 0:
print('hi')
for i in range(len(walks)):
walk = walks[i]
walks[i] = [
f"app_{int(node.split('_')[-1]) + self.offset}"
if node.startswith('app') else node
for node in walk
]
outfile = open(self.corpus_path, 'w')
print('saving..')
for walk in tqdm(walks):
outfile.write(' '.join(walk) + '\n')
outfile.close()
def create_model(self):
sentences = MyCorpus(self.corpus_path)
self.model = gensim.models.Word2Vec(
sentences=sentences, size=64, sg=1,
negative=5, window=3, iter=5, min_count=1
)
def predict_embeddings(self):
X = []
Y = []
train_labels = []
for j in range(self.A_tr_csr.shape[0]):
indexes = np.nonzero((self.A_tr_csr[j]).toarray()[0])[0]
all_api = self.model.wv.vocab.keys()
matrix = np.zeros(64)
for i in indexes:
element = 'api_' + str(i)
if element in all_api:
matrix += self.model.wv[element]
matrix /= len(all_api)
X.append(matrix)
Y.append(self.model.wv['app_' + str(j)])
train_labels.append('app_' + str(j))
regressor = RandomForestRegressor(n_estimators=500, oob_score=True, random_state=100).fit(X, Y)
test_X = []
test_labels = []
for j in range(self.A_tst.shape[0]):
indexes = np.nonzero((self.A_tst[j]).toarray()[0])[0]
all_api = self.model.wv.vocab.keys()
matrix = np.zeros(64)
for i in indexes:
element = 'api_' + str(i)
if element in all_api:
matrix += self.model.wv[element]
matrix /= len(all_api)
test_X.append(matrix)
test_labels.append('app_' + str(j + self.A_tr_csr.shape[0]))
embeddings = regressor.predict(test_X)
for i in range(len(test_labels)):
self.model.wv[test_labels[i]] = embeddings[i]
self.train_embeddings = Y
self.test_embeddings = embeddings
self.train_labels = self.meta_tr.label == 'class1'
self.test_labels = self.meta_tst.label == 'class1'
def plot_embeddings(self):
x_vals, y_vals, labels = reduce_dimensions(self)
df_dict = {'x_vals': x_vals, 'y_vals': y_vals, 'labels': labels}
df = pd.DataFrame(df_dict)
graph_labels = {0: 'train_benign', 1: 'train_malware', 2: 'test_benign', 3: 'test_malware'}
df = df.replace({"labels": graph_labels})
graph_title = self.corpus_path.split('/')[-1].split('_')[0] + " two dimensional embeddings"
plot_with_plotly(df, graph_title)
def train_nn(self, num_epoch=5000):
train_X = torch.tensor(self.train_embeddings).float()
test_X = torch.tensor(self.test_embeddings).float()
train_Y = torch.tensor(self.train_labels).float()
test_Y = torch.tensor(self.test_labels).float()
net = Net(train_X.shape[1])
criterion = torch.nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adamax(net.parameters(), lr=0.0001)
y_pred = None
y_test_pred = None
for epoch in range(num_epoch): # loop over the dataset multiple times
running_loss = 0.0
y_pred = net(train_X)
y_pred = torch.squeeze(y_pred)
train_loss = criterion(y_pred, train_Y)
if epoch % 1000 == 0:
train_acc = calculate_accuracy(train_Y, y_pred)
y_test_pred = net(test_X)
y_test_pred = torch.squeeze(y_test_pred)
test_loss = criterion(y_test_pred, test_Y)
test_acc = calculate_accuracy(test_Y, y_test_pred)
print(
f'''epoch {epoch}
Train set - loss: {round_tensor(train_loss)}, accuracy: {round_tensor(train_acc)}
Test set - loss: {round_tensor(test_loss)}, accuracy: {round_tensor(test_acc)}
''')
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
print('Finished Training')
self.nn_train_pred = y_pred
self.nn_test_pred = y_test_pred
def evaluate(self):
cm = confusion_matrix(torch.tensor(self.test_labels).float().numpy()*1, self.nn_test_pred.ge(.5).view(-1).detach().numpy()*1)
df_cm = | pd.DataFrame(cm, index=['benign', 'malware'], columns=['benign', 'malware']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
This code generates Fig. S5
The probability that cooling associated with anthropogenic aerosols has resulted in economic benefits at the country-level.
by <NAME> (<EMAIL>)
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import _env
from matplotlib.colors import ListedColormap
import seaborn.apionly as sns
import matplotlib
import geopandas as gp
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'Helvetica'
def set_latlon_ticks(ax,m):
ax.set_xticks(np.arange(-160,161,40))
ax.set_xticklabels('')
ax.set_yticks(np.arange(-90,91,45))
ax.set_yticklabels('')
parallels = np.arange(-90.,91,45.)
m.drawparallels(parallels,labels=[True,False,False,False],dashes=[3,3],xoffset=5,linewidth = 0)
meridians = np.arange(-160,161,40.)
m.drawmeridians(meridians,labels=[True,False,False,True],dashes=[3,3],yoffset=5,linewidth = 0)
ds = 'ERA-Interim'
if_ctryshp = (_env.idir_root + '/shape/country/country1.shp')
odir_plot = _env.odir_root + '/plot/'
_env.mkdirs(odir_plot)
of_plot = odir_plot + 'ED_F5.Map_Ctry_GDP_Lost_Possibility.png'
fig = plt.figure(figsize=(21,10))
for iscen,scen in enumerate(_env.scenarios[1::]):
if_gdp = _env.odir_root + '/summary_'+ds+'/country_specific_statistics_GDP_' + ds + '_' + scen + '_Burke.xls'
if_ctrylist = _env.idir_root + '/regioncode/Country_List.xls'
itbl_gdp = pd.read_excel(if_gdp,'country-lag0')
itbl_gdp.set_index('iso',inplace = True)
ishp_ctry = gp.read_file(if_ctryshp)
ishp_ctry.loc[ishp_ctry['GMI_CNTRY'] == 'ROM','GMI_CNTRY'] = 'ROU'
ishp_ctry.loc[ishp_ctry['GMI_CNTRY'] == 'ZAR','GMI_CNTRY'] = 'COD'
ishp_ctry.set_index('GMI_CNTRY',inplace = True)
ishp_ctry['prob_damg'] = 1-itbl_gdp['probability_damage']
ishp_ctry.loc[ | pd.isna(ishp_ctry['prob_damg']) | pandas.isna |
import os
import numpy as np
import pandas as pd
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_sum(df, window=10):
"""
Wrapper function to estimate rolling sum.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series sum over the past 'window' days.
"""
return df.rolling(window).sum()
def ts_prod(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).prod()
def sma(df, window=10): #simple moving average
"""
Wrapper function to estimate SMA.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series SMA over the past 'window' days.
"""
return df.rolling(window).mean()
def ema(df, n, m): #exponential moving average
"""
Wrapper function to estimate EMA.
:param df: a pandas DataFrame.
:return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1}
"""
result = df.copy()
for i in range(1,len(df)):
result.iloc[i]= (m*df.iloc[i-1] + (n-m)*result[i-1]) / n
return result
def wma(df, n):
"""
Wrapper function to estimate WMA.
:param df: a pandas DataFrame.
:return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1}
"""
weights = pd.Series(0.9*np.flipud(np.arange(1,n+1)))
result = pd.Series(np.nan, index=df.index)
for i in range(n-1,len(df)):
result.iloc[i]= sum(df[i-n+1:i+1].reset_index(drop=True)*weights.reset_index(drop=True))
return result
def stddev(df, window=10):
"""
Wrapper function to estimate rolling standard deviation.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).std()
def correlation(x, y, window=10):
"""
Wrapper function to estimate rolling corelations.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).corr(y)
def covariance(x, y, window=10):
"""
Wrapper function to estimate rolling covariance.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).cov(y)
def rolling_rank(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The rank of the last value in the array.
"""
return rankdata(na)[-1]
def ts_rank(df, window=10):
"""
Wrapper function to estimate rolling rank.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series rank over the past window days.
"""
return df.rolling(window).apply(rolling_rank)
def rolling_prod(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The product of the values in the array.
"""
return np.prod(na)
def product(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).apply(rolling_prod)
def ts_min(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).min()
def ts_max(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series max over the past 'window' days.
"""
return df.rolling(window).max()
def delta(df, period=1):
"""
Wrapper function to estimate difference.
:param df: a pandas DataFrame.
:param period: the difference grade.
:return: a pandas DataFrame with today’s value minus the value 'period' days ago.
"""
return df.diff(period)
def delay(df, period=1):
"""
Wrapper function to estimate lag.
:param df: a pandas DataFrame.
:param period: the lag grade.
:return: a pandas DataFrame with lagged time series
"""
return df.shift(period)
def rank(df):
"""
Cross sectional rank
:param df: a pandas DataFrame.
:return: a pandas DataFrame with rank along columns.
"""
#return df.rank(axis=1, pct=True)
return df.rank(pct=True)
def scale(df, k=1):
"""
Scaling time serie.
:param df: a pandas DataFrame.
:param k: scaling factor.
:return: a pandas DataFrame rescaled df such that sum(abs(df)) = k
"""
return df.mul(k).div(np.abs(df).sum())
def ts_argmax(df, window=10):
"""
Wrapper function to estimate which day ts_max(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmax) + 1
def ts_argmin(df, window=10):
"""
Wrapper function to estimate which day ts_min(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmin) + 1
def decay_linear(df, period=10):
"""
Linear weighted moving average implementation.
:param df: a pandas DataFrame.
:param period: the LWMA period
:return: a pandas DataFrame with the LWMA.
"""
try:
df = df.to_frame() #Series is not supported for the calculations below.
except:
pass
# Clean data
if df.isnull().values.any():
df.fillna(method='ffill', inplace=True)
df.fillna(method='bfill', inplace=True)
df.fillna(value=0, inplace=True)
na_lwma = np.zeros_like(df)
na_lwma[:period, :] = df.iloc[:period, :]
na_series = df.values
divisor = period * (period + 1) / 2
y = (np.arange(period) + 1) * 1.0 / divisor
# Estimate the actual lwma with the actual close.
# The backtest engine should assure to be snooping bias free.
for row in range(period - 1, df.shape[0]):
x = na_series[row - period + 1: row + 1, :]
na_lwma[row, :] = (np.dot(x.T, y))
return pd.DataFrame(na_lwma, index=df.index, columns=['CLOSE'])
def highday(df, n): #计算df前n期时间序列中最大值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmax()
return result
def lowday(df, n): #计算df前n期时间序列中最小值距离当前时点的间隔
result = pd.Series(np.nan, index=df.index)
for i in range(n,len(df)):
result.iloc[i]= i - df[i-n:i].idxmin()
return result
def daily_panel_csv_initializer(csv_name): #not used now
if os.path.exists(csv_name)==False:
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY')
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')
dataset=0
for date in date_list["TRADE_DATE"]:
stock_list[date]=stock_list["INDUSTRY"]
stock_list.drop("INDUSTRY",axis=1,inplace=True)
stock_list.set_index("TS_CODE", inplace=True)
dataset = pd.DataFrame(stock_list.stack())
dataset.reset_index(inplace=True)
dataset.columns=["TS_CODE","TRADE_DATE","INDUSTRY"]
dataset.to_csv(csv_name,encoding='utf-8-sig',index=False)
else:
dataset=pd.read_csv(csv_name)
return dataset
def IndustryAverage_vwap():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_vwap.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average vwap data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average vwap data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average vwap data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = VWAP
result_unaveraged_piece.rename("VWAP_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VWAP_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_vwap.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_close():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_close.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average close data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average close data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average close data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = CLOSE
result_unaveraged_piece.rename("CLOSE_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["CLOSE_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_close.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_low():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_low.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average low data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average low data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average low data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
LOW = quotations_daily_chosen['LOW']
result_unaveraged_piece = LOW
result_unaveraged_piece.rename("LOW_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["LOW_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_low.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_volume():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_volume.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average volume data needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average volume data needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average volume data is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = VOLUME
result_unaveraged_piece.rename("VOLUME_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VOLUME_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_volume.csv",encoding='utf-8-sig')
return result_industryaveraged_df
def IndustryAverage_adv(num):
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_adv{num}.csv".format(num=num))
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average adv{num} data needs not to be updated.".format(num=num))
return result_industryaveraged_df
else:
print("The corresponding industry average adv{num} data needs to be updated.".format(num=num))
first_date_update = date_list_update[0]
except:
print("The corresponding industry average adv{num} data is missing.".format(num=num))
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = sma(VOLUME, num)
result_unaveraged_piece.rename("ADV{num}_UNAVERAGED".format(num=num),inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["ADV{num}_UNAVERAGED".format(num=num)].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_adv{num}.csv".format(num=num),encoding='utf-8-sig')
return result_industryaveraged_df
#(correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close
def IndustryAverage_PreparationForAlpha048():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha048.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha048 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha048 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha048 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (correlation(delta(CLOSE, 1), delta(delay(CLOSE, 1), 1), 250) *delta(CLOSE, 1)) / CLOSE
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA048_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_update:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA048_UNAVERAGED"].mean()
result_industryaveraged_df.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_df.to_csv("IndustryAverage_Data_PreparationForAlpha048.csv",encoding='utf-8-sig')
return result_industryaveraged_df
#(vwap * 0.728317) + (vwap *(1 - 0.728317))
def IndustryAverage_PreparationForAlpha059():
stock_list=local_source.get_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].drop_duplicates()
date_list=local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
#check for building/updating/reading dataset
try:
result_industryaveraged_df = pd.read_csv("IndustryAverage_Data_PreparationForAlpha059.csv")
result_industryaveraged_df["TRADE_DATE"] = result_industryaveraged_df["TRADE_DATE"].astype(int)
result_industryaveraged_df.set_index("TRADE_DATE",inplace=True)
date_list_existed = pd.Series(result_industryaveraged_df.index)
date_list_update = date_list[~date_list.isin(date_list_existed)]
if len(date_list_update)==0:
print("The corresponding industry average data for alpha059 needs not to be updated.")
return result_industryaveraged_df
else:
print("The corresponding industry average data for alpha059 needs to be updated.")
first_date_update = date_list_update[0]
except:
print("The corresponding industry average dataset for alpha059 is missing.")
result_industryaveraged_df=pd.DataFrame(index=date_list,columns=industry_list)
date_list_update = date_list
first_date_update=0
#building/updating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.get_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].astype(int)
quotations_daily_chosen=quotations_daily_chosen.applymap(lambda x: np.nan if x=="NULL" else x)
try: #valid only in updating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_update].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = (VWAP * 0.728317) + (VWAP *(1 - 0.728317))
result_unaveraged_piece.rename("PREPARATION_FOR_ALPHA059_UNAVERAGED",inplace=True)
result_unaveraged_piece = pd.DataFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_update] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry= | pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # Desafio 5
#
# Neste desafio, vamos praticar sobre redução de dimensionalidade com PCA e seleção de variáveis com RFE. Utilizaremos o _data set_ [Fifa 2019](https://www.kaggle.com/karangadiya/fifa19), contendo originalmente 89 variáveis de mais de 18 mil jogadores do _game_ FIFA 2019.
#
# > Obs.: Por favor, não modifique o nome das funções de resposta.
# ## _Setup_ geral
# In[45]:
from math import sqrt
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
import seaborn as sns
import statsmodels.api as sm
import statsmodels.stats as st
from sklearn.decomposition import PCA
#from loguru import logger
# In[46]:
# Algumas configurações para o matplotlib.
#%matplotlib inline
#from IPython.core.pylabtools import figsize
#figsize(12, 8)
#sns.set()
# In[78]:
fifa = pd.read_csv("/home/gabriel/codenation/data-science-3/data.csv")
# In[79]:
columns_to_drop = ["Unnamed: 0", "ID", "Name", "Photo", "Nationality", "Flag",
"Club", "Club Logo", "Value", "Wage", "Special", "Preferred Foot",
"International Reputation", "Weak Foot", "Skill Moves", "Work Rate",
"Body Type", "Real Face", "Position", "Jersey Number", "Joined",
"Loaned From", "Contract Valid Until", "Height", "Weight", "LS",
"ST", "RS", "LW", "LF", "CF", "RF", "RW", "LAM", "CAM", "RAM", "LM",
"LCM", "CM", "RCM", "RM", "LWB", "LDM", "CDM", "RDM", "RWB", "LB", "LCB",
"CB", "RCB", "RB", "Release Clause"
]
try:
fifa.drop(columns_to_drop, axis=1, inplace=True)
except KeyError:
logger.warning(f"Columns already dropped")
# ## Inicia sua análise a partir daqui
# In[5]:
# Sua análise começa aqui.
# In[80]:
fifa.shape
# In[8]:
fifa.head()
# In[5]:
fifa.describe()
# In[49]:
def missing_values_table(df):
mis_val = df.isnull().sum()
mis_val_percent = 100 * df.isnull().sum()/len(df)
mis_val_table = | pd.concat([mis_val, mis_val_percent], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Importing dependencies
import os
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
pd.set_option('display.max_columns', None)
# In[2]:
# Path to source JSON
businessJson=os.path.join('sourceData', 'business.json')
# In[3]:
# Path to Yelp food/restaurant categories csv
yelpCategories=os.path.join('sourceData', 'yelpCategories.csv')
# In[4]:
# Creating pd dataframe
business_raw=pd.read_json(businessJson, lines=True)
# In[5]:
# Select only the businesses in Ontario
business_on=business_raw.loc[business_raw['state'] == 'ON']
# In[6]:
# Dropping any rows with blank values in these categories
business_on=business_on.dropna(subset=['name', 'address', 'postal_code', 'city', 'state', 'latitude', 'longitude', 'attributes',
'categories', 'hours']).reset_index(drop=True)
# In[7]:
# Regex to fix spelling mistakes
business_on.replace({'city': {'^AGINCOURT$': 'Agincourt',
'^Bradford West Gwillimbury$': 'Bradford',
'^East Ajax$': 'Ajax',
'^Caledon.{,8}$': 'Caledon',
'^East Gwil{1,2}imbury$': 'East Gwillimbury',
'(?i)^.*icoke$': 'Etobicoke',
'^.{,9}Toro?nto.{,9}$': 'Toronto',
'Malton': 'Mississauga',
'^.{,5}Missis{1,2}a?ua?g.{1,2}$': 'Mississauga',
'^Regional Municipality of York$': 'North York',
'(?i)^North.{0,2}York$': 'North York',
'^York Regional Municipality$': 'York',
'^Willowdale$': 'North York',
'^North of Brampton$': 'Brampton',
'(?i)^Oak.?ridges$': 'Oak Ridges',
'^oakville$': 'Oakville',
'(?i)^Richmond?.?Hill?$': 'Richmond Hill',
'^.{,8}Scar.?bo?rough$': 'Scarborough',
'^.{,11}Stouffville$': 'Stouffville',
'(?i)^Thornhil{,2}$': 'Thornhill',
'^.*Vaugh.{,3}$': 'Vaughan',
'^Wh.?i.?by$': 'Whitby'}}, inplace=True, regex=True)
# In[8]:
business=business_on.loc[business_on['city'].isin(['Unionville', 'Bolton', 'York', 'Bradford', 'Concord', 'East York', 'Stouffville',
'Woodbridge', 'Aurora', 'Ajax', 'Whitby', 'Pickering', 'Thornhill', 'Newmarket',
'Oakville', 'Etobicoke', 'North York', 'Scarborough', 'Vaughan', 'Richmond Hill',
'Brampton', 'Markham', 'Mississauga', 'Toronto'])].reset_index(drop=True)
# In[9]:
# Only taking these columns
business=business.loc[:, ['name', 'address', 'postal_code', 'city', 'latitude', 'longitude','categories', 'stars', 'hours','attributes']]
business.columns=['Name', 'Address', 'Postal_code', 'City', 'Latitude', 'Longitude', 'Categories', 'Stars', 'Hours', 'Attributes']
# ## Handling the hours column
# In[10]:
# Turning the hours column in to a df
hours_raw=json_normalize(data=business['Hours'])
business.drop(columns='Hours', inplace=True)
# In[11]:
# Reorganise columns
hours_raw=hours_raw.loc[:,['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']]
# In[12]:
# Create a new df with opening and closing hours
columnsHours=hours_raw.columns
hours=hours_raw
# In[13]:
# Loop through and split the columns
for column in columnsHours:
hours[[f"{column}_open", f"{column}_close"]]=hours_raw[column].str.split('-', expand=True)
hours.drop(columns=columnsHours, inplace=True)
hours=hours.apply(lambda x: x.str.strip())
# In[14]:
# # Create new list of column names and convert the time to minutes
columnsHours=hours.columns
for column in columnsHours:
hours[column]=hours[column].replace('$', ':00', regex=True)
hours[column]= | pd.to_timedelta(hours[column]) | pandas.to_timedelta |
# -*- coding: utf-8 -*-
"""
Creates textual features from an intput paragraph
"""
# Load Packages
import textstat
from sklearn.preprocessing import label_binarize
from sklearn.decomposition import PCA
import numpy as np
import pandas as pd
import pkg_resources
import ast
import spacy
#from collections import Counter
from pyphen import Pyphen
import pickle
#import xgboost
# lead the language model from spay. this must be downloaded
nlp = spacy.load('en_core_web_md')
pyphen_dic = Pyphen(lang='en')
# set word lists to be used
## This corpus comes from the Cambridge English Corpus of spoken English and includes
## all the NGSL and SUP words needed to get 90% coverage.
NGSL_wordlist = set([
ln.decode('utf-8').strip() for ln in
pkg_resources.resource_stream('financial_readability', 'word_lists/NGSL_wordlist.txt')
])
## The Business Service List 1.0, also known as the BSL (<NAME>. & Culligan, B., 2016) is a list of approximately 1700 words
## that occur with very high frequency within the domain of general business English. Based on a 64.5 million word corpus of business
## texts, newspapers, journals and websites, the BSL 1.0 version gives approximately 97% coverage of general business English materials
## when learned in combination with the 2800 words of core general English in the New General Service List or NGSL (<NAME>., Culligan, B., and Phillips, J. 2013)
BSL_wordlist = set([
ln.decode('utf-8').strip() for ln in
pkg_resources.resource_stream('financial_readability', 'word_lists/BSL_wordlist.txt')
])
## New Academic Word List (NAWL): The NAWL is based on a carefully selected academic corpus of 288 million words.
NAWL_wordlist = set([
ln.decode('utf-8').strip() for ln in
pkg_resources.resource_stream('financial_readability', 'word_lists/NAWL_wordlist.txt')
])
## Load tf_idf score list
idf_list = list([
ln.decode('utf-8').strip() for ln in
pkg_resources.resource_stream('financial_readability', 'word_lists/dict_idf.txt')
])
idf_dict = ast.literal_eval(idf_list[0])
## Load the BOFIR model
with pkg_resources.resource_stream('financial_readability', 'models/bofir_model_5c.pickle.dat') as f:
bofir_model_5c = pickle.load(f)
with pkg_resources.resource_stream('financial_readability', 'models/bofir_model_3c.pickle.dat') as f:
bofir_model_3c = pickle.load(f)
#bofir_model_5c = pickle.load(open("bofir_model_5c.pickle.dat", "rb"))
#bofir_model_3c = pickle.load(open("bofir_model_3c.pickle.dat", "rb"))
#%%
# Create Features
class readability:
"""
Creates various text features for a paragraph
Methods
-------
syl_count(word=None)
Counts the number of syllables for a given word
linguistic_features(as_dict = False)
Returns the tokens and their linguistic features based on the spacy doc container
pos_onehot():
Creates the POS tag per token in a one-hot encoding.
dep_onehot():
Creates the dependency tag per token in a one-hot encoding.
wordlist_features(as_dict=False):
Creates word features based on word lists and the calculated tf-idf scores.
other_features(as_dict=False):
Returns dummies for the remaining spacy word features
classic_features(as_dict=False):
Returns the classic word features
check_word_list(token, word_list='NGSL'):
Function to check if token exists in specific word list.
check_tfidf(token):
Function to check if token exists in tf_idf list and return idf score.
tree_features(as_dict=False):
Function to create the tree based features.
semantic_features(as_dict=False):
Function to calculate the cumulative explained variance for PCA on the word embeddings.
word_features(embeddings = False):
Combines the featuresets to a Dataframe with all the 88 word-features.
paragraph_features(embed = False, as_dict = False):
Create the feature set over the total paragraph based on the
features estimated per word.
bofir(cat5 = True):
Use the paragraph features to calculate the BOFIR score for a
given paragraph.
readability_measures(as_dict = False):
Return the BOFIR score as well as other classic readability formulas for the paragraph.
"""
def __init__(self, paragraph):
self.paragraph = paragraph
# create the standard readability measures
self.flesch = textstat.flesch_reading_ease(self.paragraph)
# create a spacy doc container
self.doc = nlp(paragraph)
# Spacy text variables
self.token = [token.text for token in self.doc]
self.sent = [sentence.text for sentence in self.doc.sents]
self.lenght = [len(token.text) for token in self.doc]
self.lemma = [token.lemma_ for token in self.doc]
self.pos = [token.pos_ for token in self.doc]
self.tag = [token.tag_ for token in self.doc]
self.dep = [token.dep_ for token in self.doc]
self.like_email = [token.like_email for token in self.doc]
self.like_url = [token.like_url for token in self.doc]
self.is_alpha = [token.is_alpha for token in self.doc]
self.is_stop = [token.is_stop for token in self.doc]
self.ent_type = [token.ent_type_ for token in self.doc]
self.ent_pos = [token.ent_iob_ for token in self.doc]
self.word_vectors = [token.vector for token in self.doc]
self.vector_norm = [token.vector_norm for token in self.doc]
self.is_oov = [token.is_oov for token in self.doc]
# lexical chain - dependencies of words:
self.subtree_lenght = [len(list(token.subtree)) for token in self.doc]
self.n_left = [len(list(token.lefts)) for token in self.doc]
self.n_right = [len(list(token.rights)) for token in self.doc]
self.ancestors = [len(list(token.ancestors)) for token in self.doc]
self.children = [len(list(token.children)) for token in self.doc]
# count syllables per token
self.syllables = [self.syl_count(token.text) for token in self.doc]
# number of sentences and tokens
self.n_sentences = len(self.sent)
self.n_tokens = len(self.token)
def syl_count(self, word):
"""
Counts the number of syllables for a given word
Parameters
----------
word : str
The token to be analyzed
Returns
-------
count: integer
The number of syllables
"""
count = 0
split_word = pyphen_dic.inserted(word.lower())
count += max(1, split_word.count("-") + 1)
return count
def linguistic_features(self, as_dict = False):
"""
Function that returns the tokens and their linguistic features based on the spacy doc container
doc: spacy doc input
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
Output Variables:
-------
Text: The original word text.
Lemma: The base form of the word.
POS: The simple part-of-speech tag.
Tag: The detailed part-of-speech tag.
Dep: Syntactic dependency, i.e. the relation between tokens.
like_email: Does the token resemble an email address?
is_alpha: Does the token consist of alphabetic characters?
is stop: Is the token part of a stop list, i.e. the most common words of the language?
ent_type: Named entity type
ent_pos: IOB code of named entity tag.
vector_norm: The L2 norm of the token’s vector (the square root of
the sum of the values squared)
is_oov: Out-of-vocabulary
lexical chain variables determine the dependency tree:
subtree_lenght: total number of suptrees
n_left: number of connections left
n_left: number of connections right
ancestors: number of nodes above
children: number of nodes below
syllables: number of syllables (only for words found in the dictionary)
"""
d = {'token':self.token,'lenght':self.lenght,'lemma':self.lemma,
'pos':self.pos,'tag':self.tag,
'dep':self.dep,'like_email':self.like_email,'like_url':self.like_url,
'stop':self.is_stop, 'alpha':self.is_alpha,
'ent_type':self.ent_type,'ent_pos':self.ent_pos,
'vector_norm':self.vector_norm,'oov':self.is_oov,
'subtree_lenght':self.subtree_lenght, 'n_left':self.n_left,
'n_right':self.n_right,'ancestors':self.ancestors,
'children':self.children,'syllables': self.syllables}
if as_dict:
return d
else:
return pd.DataFrame(d)
def pos_onehot(self):
"""
Creates the POS tag per token in a one-hot encoding. (To be agregated
over the paragraph or used as input into a RNN.)
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
Output Variables:
-------
ADJ adjective
ADP adposition
ADV adverb
AUX auxiliary
CONJ conjunction
CCONJ coordinating conjunction
DET determiner
INTJ interjection
NOUN noun
NUM numeral
PART particle
PRON pronoun
PROPN proper noun
PUNCT punctuation
SCONJ subordinating conjunction
SYM symbol
VERB verb
X other
SPACE space
"""
pos_tags_classes = ['ADJ', 'ADP', 'ADV','AUX', 'CONJ', 'CCONJ', 'DET',
'INTJ', 'JJS', 'NOUN', 'NUM', 'PART',
'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'VERB',
'X', 'SPACE']
pos_tag_data = self.pos
# one hot encoding of the different POS tags
x = label_binarize(pos_tag_data, classes=pos_tags_classes)
output = pd.DataFrame(x, columns=pos_tags_classes)
return output
def dep_onehot(self):
"""
Creates the dependency tag per token in a one-hot encoding.
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
Output Variables:
-------
acl clausal modifier of noun (adjectival clause)
acomp adjectival complement
advcl adverbial clause modifier
advmod adverbial modifier
agent agent
amod adjectival modifier
appos appositional modifier
attr attribute
aux auxiliary
auxpass auxiliary (passive)
case case marking
cc coordinating conjunction
ccomp clausal complement
compound compound
conj conjunct
cop copula
csubj clausal subject
csubjpass clausal subject (passive)
dative dative
dep unclassified dependent
det determiner
dobj direct object
expl expletive
intj interjection
mark marker
meta meta modifier
neg negation modifier
nn noun compound modifier
nounmod modifier of nominal
npmod noun phrase as adverbial modifier
nsubj nominal subject
nsubjpass nominal subject (passive)
nummod numeric modifier
oprd object predicate
obj object
obl oblique nominal
parataxis parataxis
pcomp complement of preposition
pobj object of preposition
poss possession modifier
preconj pre-correlative conjunction
prep prepositional modifier
prt particle
punct punctuation
quantmod modifier of quantifier
relcl relative clause modifier
root root
xcomp open clausal complement
"""
dep_tags_classes = ['acl', 'acomp', 'advcl','advmod', 'agent', 'amod',
'appos', 'attr', 'aux', 'auxpass', 'case', 'cc',
'ccomp', 'compound', 'conj', 'cop', 'csubj', 'csubjpass',
'dative', 'dep','det', 'dobj', 'expl',
'intj', 'mark', 'meta', 'neg', 'nn', 'nounmod', 'npmod',
'nsubj','nsubjpass', 'nummod', 'oprd',
'obj', 'obl', 'parataxis', 'pcomp', 'pobj', 'poss', 'preconj',
'prep','prt', 'punct', 'quantmod',
'relcl','root', 'xcomp']
# one hot encoding of the different DEP tags
x = label_binarize(self.dep, classes=dep_tags_classes)
output = pd.DataFrame(x, columns=dep_tags_classes)
return output
def wordlist_features(self, as_dict=False):
"""
Creates word features based on word lists and the calculated tf-idf scores.
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
"""
NGSL = [self.check_word_list(token.lower(), word_list='NGSL') for token in self.token]
BSL = [self.check_word_list(token.lower(), word_list='BSL') for token in self.token]
NAWL = [self.check_word_list(token.lower(), word_list='NAWL') for token in self.token]
idf = [self.check_tfidf(token.lower()) for token in self.token]
d = {'ngsl': NGSL,'bsl': BSL,'nawl': NAWL, 'idf': idf}
if as_dict:
return d
else:
return pd.DataFrame(d)
def other_features(self, as_dict=False):
"""
Returns dummies for the remaining spacy word features
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
"""
# the dummy variables
is_entity = [1 if token != 'O' else 0 for token in self.ent_pos]
like_email = [1 if token == True else 0 for token in self.like_email]
like_url = [1 if token == True else 0 for token in self.like_url]
is_stop = [1 if token == True else 0 for token in self.is_stop]
is_alpha = [1 if token == True else 0 for token in self.is_alpha]
is_oov = [1 if token == True else 0 for token in self.is_oov]
d = {'is_entity': is_entity,'like_email': like_email,'like_url': like_url,
'is_stop': is_stop, 'is_alpha': is_alpha, 'is_oov': is_oov,
'vector_norm':self.vector_norm}
if as_dict:
return d
else:
return pd.DataFrame(d)
def classic_features(self, as_dict=False):
"""
Returns the classic word features
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
"""
large_words = [1 if syl >= 3 else 0 for syl in self.syllables]
polsyll = [1 if syl > 1 else 0 for syl in self.syllables]
# the dummy variables
d = {'syllables': self.syllables, 'large_word': large_words,
'polsyll':polsyll, 'lenght':self.lenght}
if as_dict:
return d
else:
return pd.DataFrame(d)
def check_word_list(self, token, word_list='NGSL'):
"""
Function to check if token exists in specific word list.
Parameters
----------
token : str
The token to be analyzed
word_list : str
Defines the wordlist to be considered (NGSL, BSL or NAWL) if nothing
is specified, NAWL is considered
Returns
-------
x: integer
Dummy (0 or 1) if word is in the specified word list
"""
if word_list=='NGSL':
word_set = NGSL_wordlist
elif word_list=='BSL':
word_set = BSL_wordlist
else:
word_set = NAWL_wordlist
if token not in word_set:
x = 0
else:
x=1
return x
def check_tfidf(self, token):
"""
Function to check if token exists in tf_idf list and return idf score.
Parameters
----------
token : str
The token to be analyzed
Returns
-------
value: integer
IDF value
"""
value = idf_dict.get(token, 0)
return value
def tree_features(self, as_dict=False):
"""
Function to create the tree based features.
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
Output Variables
-------
subtree_lenght
n_left
n_right
ancestors
children
"""
# lexical chain - dependencies of words:
self.subtree_lenght = [len(list(token.subtree)) for token in self.doc]
self.n_left = [len(list(token.lefts)) for token in self.doc]
self.n_right = [len(list(token.rights)) for token in self.doc]
self.ancestors = [len(list(token.ancestors)) for token in self.doc]
self.children = [len(list(token.children)) for token in self.doc]
d = {'subtree_lenght':self.subtree_lenght, 'n_left':self.n_left,'n_right':self.n_right,
'ancestors':self.ancestors,'children':self.children}
if as_dict:
return d
else:
return pd.DataFrame(d)
def semantic_features(self, as_dict=False):
"""
Function to calculate the cumulative explained variance for PCA on the word embeddings.
Parameters
----------
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables
Output Variables
-------
wordvec_1pc
wordvec_3pc
wordvec_10pc
"""
pca = PCA()
pca.fit(self.word_vectors)
explained_var = pd.DataFrame(pca.explained_variance_ratio_, columns=['expl_var'])
wordvec_1pc = np.sum(explained_var.iloc[0])
wordvec_3pc = np.sum(explained_var.iloc[0:3])
wordvec_10pc = np.sum(explained_var.iloc[0:10])
d = {'wordvec_1pc':wordvec_1pc,'wordvec_3pc':wordvec_3pc,'wordvec_10pc':wordvec_10pc}
if as_dict:
return d
else:
return pd.DataFrame(d)
def word_features(self, embeddings = False):
"""
Combines the featuresets to a Dataframe with
all the 88 word-features.
Parameters
----------
embeddings : boolean
Defines if the word embeddings (n=300) are included or not
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables. Each row represents a token
and the features are in the columns (n x 88) as there are 88 word-features
"""
classic_features = self.classic_features()
pos_features = self.pos_onehot()
dep_features = self.dep_onehot()
wordlist_features = self.wordlist_features()
other_features = self.other_features()
tree_features = self.tree_features()
if embeddings:
nameslist = ["V{:02d}".format(x+1) for x in range(300)]
word_embeddings = pd.DataFrame(self.word_vectors, columns = nameslist)
return pd.concat([classic_features,pos_features,dep_features, wordlist_features,
other_features,tree_features,word_embeddings], axis=1)
else:
return pd.concat([classic_features, pos_features,dep_features, wordlist_features,
other_features,tree_features], axis=1)
def paragraph_features(self, embed = False, as_dict = False):
"""
Create the feature set over the total paragraph based on the
features estimated per word.
Parameters
----------
embed : boolean
Defines if the word embeddings (n=300) are included or not
as_dict : boolean
Defines if output is a dataframe or dict
Returns
-------
d: pandas DataFrame
Dataframe with all the Output Variables. Each row represents a feature.
columns:
cat: feature category
value: value of the feature
"""
# word embeddings
word_embeddings_raw = pd.DataFrame(self.word_vectors, columns = ["V{:02d}".format(x+1) for x in range(300)])
# create all datasets with the mean word values
classic_features = pd.DataFrame(self.classic_features().mean(), columns= ['value'])
classic_features['cat'] = 'classic'
dep_features = pd.DataFrame(self.dep_onehot().mean(), columns= ['value'])
dep_features['cat'] = 'dep'
wordlist_features = pd.DataFrame(self.wordlist_features().mean(), columns= ['value'])
wordlist_features['cat'] = 'classic'
pos_features = pd.DataFrame(self.pos_onehot().mean(), columns= ['value'])
pos_features['cat'] = 'pos'
tree_features = pd.DataFrame(self.tree_features().mean(), columns= ['value'])
tree_features['cat'] = 'tree'
other_features = pd.DataFrame(self.other_features().mean(), columns= ['value'])
other_features['cat'] = 'classic'
semantic_features = pd.DataFrame(self.semantic_features().mean(), columns= ['value'])
semantic_features['cat'] = 'semantic'
word_embeddings = pd.DataFrame(word_embeddings_raw.mean(), columns= ['value'])
word_embeddings['cat'] = 'embeddings'
if embed:
temp_df = pd.concat([classic_features, dep_features, wordlist_features, pos_features, other_features,
tree_features,semantic_features, word_embeddings], axis=0)
else:
temp_df = pd.concat([classic_features, dep_features, wordlist_features, pos_features, other_features,
tree_features,semantic_features], axis=0)
temp_df['var'] = temp_df.index
# add standard features that are not based on word features
paragraph_features = | pd.DataFrame(columns=['var','value', 'cat']) | pandas.DataFrame |
# coding:utf-8
import os
from pathlib import Path
import sys
import argparse
import pdb
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import pickle
import time
from datetime import datetime, timedelta
from sklearn.metrics import confusion_matrix
from functools import partial
import scipy as sp
import matplotlib.pyplot as plt
#from matplotlib_venn import venn2
import lightgbm as lgb
from sklearn import preprocessing
import seaborn as sns
import gc
import psutil
import os
from IPython.display import FileLink
import statistics
import json
import ast
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_validate
import collections
import random
import functools
from sklearn.metrics import roc_curve,auc,accuracy_score,confusion_matrix,f1_score,classification_report
from sklearn.metrics import mean_squared_error
# The metric in question
from sklearn.metrics import cohen_kappa_score
import copy
from sklearn.model_selection import StratifiedKFold, KFold, train_test_split
import itertools
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from distutils.util import strtobool
import math
from scipy.sparse import csr_matrix, save_npz, load_npz
from typing import Union
from sklearn.decomposition import PCA
#import dask.dataframe as dd
import re
from sklearn.cluster import KMeans
from contextlib import contextmanager
from collections import deque
#import eli5
#from eli5.sklearn import PermutationImportance
import shutil
import array
#import sqlite3
#from tsfresh.utilities.dataframe_functions import roll_time_series
#from tsfresh import extract_features
SEED_NUMBER=2020
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
set_seed(SEED_NUMBER)
pd.set_option('display.max_columns', 5000)
pd.set_option('display.max_rows', 1000)
EMPTY_NUM=-999
# https://github.com/lopuhin/kaggle-imet-2019/blob/master/imet/utils.py#L17
ON_KAGGLE = False#'KAGGLE_URL_BASE'in os.environ
#print(" os.environ :", os.environ)
print("ON_KAGGLE:", ON_KAGGLE)
if not ON_KAGGLE:
#import slackweb
try:
import wandb
from wandb.lightgbm import wandb_callback
except:
print(f"error : cannot import wandb")
else:
import warnings
warnings.simplefilter('ignore')
PROJECT_NAME = "probspace_kiva"
INPUT_DIR = Path("../data/raw")
PROC_DIR = Path("../data/proc")
LOG_DIR = Path("../data/log")
OUTPUT_DIR = Path("../data/submission")
PATH_TO_GRAPH_DIR=Path("../data/graph")
PATH_TO_MODEL_DIR=Path("../data/model")
PATH_TO_UPLOAD_MODEL_PARENT_DIR=Path("../data/model")
PATH_TO_FEATURES_DIR=Path("../data/features")
class Colors:
"""Defining Color Codes to color the text displayed on terminal.
"""
blue = "\033[94m"
green = "\033[92m"
yellow = "\033[93m"
red = "\033[91m"
end = "\033[0m"
def color(string: str, color: Colors = Colors.yellow) -> str:
return f"{color}{string}{Colors.end}"
@contextmanager
def timer2(label: str) -> None:
"""compute the time the code block takes to run.
"""
p = psutil.Process(os.getpid())
start = time.time() # Setup - __enter__
m0 = p.memory_info()[0] / 2. ** 30
print(color(f"{label}: Start at {start}; RAM USAGE AT START {m0}"))
try:
yield # yield to body of `with` statement
finally: # Teardown - __exit__
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
end = time.time()
print(color(f"{label}: End at {end} ({end - start}[s] elapsed); RAM USAGE AT END {m1:.2f}GB ({sign}{delta:.2f}GB)", color=Colors.red))
@contextmanager
def trace(title):
t0 = time.time()
p = psutil.Process(os.getpid())
m0 = p.memory_info()[0] / 2. ** 30
yield
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
print(f"[{m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec] {title} ", file=sys.stderr)
def cpu_dict(my_dictionary, text=None):
size = sys.getsizeof(json.dumps(my_dictionary))
#size += sum(map(sys.getsizeof, my_dictionary.values())) + sum(map(sys.getsizeof, my_dictionary.keys()))
print(f"{text} size : {size}")
def cpu_stats(text=None):
#if not ON_KAGGLE:
pid = os.getpid()
py = psutil.Process(pid)
memory_use = py.memory_info()[0] / 2. ** 30
print('{} memory GB:'.format(text) + str(memory_use))#str(np.round(memory_use, 2)))
def reduce_mem_Series(se, verbose=True, categories=False):
numeric2reduce = ["int16", "int32", "int64", "float64"]
col_type = se.dtype
best_type = None
if (categories==True) & (col_type == "object"):
se = se.astype("category")
best_type = "category"
elif col_type in numeric2reduce:
downcast = "integer" if "int" in str(col_type) else "float"
se = pd.to_numeric(se, downcast=downcast)
best_type = se.dtype.name
if verbose and best_type is not None and best_type != str(col_type):
print(f"Series '{se.index}' converted from {col_type} to {best_type}")
return se
def reduce_mem_usage(df, verbose=True, categories=False):
# All types that we want to change for "lighter" ones.
# int8 and float16 are not include because we cannot reduce
# those data types.
# float32 is not include because float16 has too low precision.
numeric2reduce = ["int16", "int32", "int64", "float64"]
start_mem = 0
if verbose:
start_mem = df.memory_usage().sum() / 1024**2
#start_mem = memory_usage_mb(df, deep=deep)
for col, col_type in df.dtypes.iteritems():
best_type = None
if (categories==True) & (col_type == "object"):
df[col] = df[col].astype("category")
best_type = "category"
elif col_type in numeric2reduce:
downcast = "integer" if "int" in str(col_type) else "float"
df[col] = pd.to_numeric(df[col], downcast=downcast)
best_type = df[col].dtype.name
# Log the conversion performed.
if verbose and best_type is not None and best_type != str(col_type):
print(f"Column '{col}' converted from {col_type} to {best_type}")
if verbose:
#end_mem = memory_usage_mb(df, deep=deep)
end_mem = df.memory_usage().sum() / 1024**2
diff_mem = start_mem - end_mem
percent_mem = 100 * diff_mem / start_mem
print(f"Memory usage decreased from"
f" {start_mem:.2f}MB to {end_mem:.2f}MB"
f" ({diff_mem:.2f}MB, {percent_mem:.2f}% reduction)")
return df
@contextmanager
def timer(name):
t0 = time.time()
yield
print(f'[{name}] done in {time.time() - t0:.6f} s')
def normal_sampling(mean, label_k, std=2, under_limit=1e-15):
val = math.exp(-(label_k-mean)**2/(2*std**2))/(math.sqrt(2*math.pi)*std)
if val < under_limit:
val = under_limit
return val
def compHist(np_oof, np_y_pred, np_y_true, title_str):
np_list = [np_oof, np_y_true, np_y_pred]
label_list = ["oof", "true", "pred"]
color_list = ['red', 'blue', 'green']
for np_data, label, color in zip(np_list, label_list, color_list):
sns.distplot(
np_data,
#bins=sturges(len(data)),
color=color,
kde=True,
label=label
)
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_compHist.png"))
plt.close()
def compPredTarget(y_pred, y_true, index_list, title_str, lm_flag=False):
df_total = pd.DataFrame({"Prediction" : y_pred.flatten(),
"Target" : y_true.flatten(),
"Difference" : y_true.flatten() -y_pred.flatten()
#"type" : np.full(len(y_pred), "oof")
}, index=index_list)
print(df_total)
print("Difference > 0.1 : ", df_total[np.abs(df_total["Difference"]) > 0.1].Difference.count())
#print(df_total[df_total["type"]=="valid_train"].Difference)
fig = plt.figure()
sns.displot(df_total.Difference,bins=10)
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_oof_diff_distplot.png"))
plt.close()
#pdb.set_trace()
if lm_flag:
plt.figure()
fig2 = sns.lmplot(x="Target", y="Prediction", data=df_total, palette="Set1")
#fig.set_axis_labels('target', 'pred')
plt.title(title_str)
plt.tight_layout()
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_oof_true_lm.png"))
plt.close()
def dimensionReductionPCA(df, _n_components, prefix="PCA_"):
pca = PCA(n_components=_n_components)
pca.fit(df)
reduced_feature = pca.transform(df)
df_reduced = pd.DataFrame(reduced_feature, columns=[f"{prefix}{x + 1}" for x in range(_n_components)], index=df.index)
print(f"df_reduced:{df_reduced}")
df_tmp = pd.DataFrame(pca.explained_variance_ratio_, index=[f"{prefix}{x + 1}" for x in range(_n_components)])
print(df_tmp)
import matplotlib.ticker as ticker
plt.gca().get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
plt.plot([0] + list( np.cumsum(pca.explained_variance_ratio_)), "-o")
plt.xlabel("Number of principal components")
plt.ylabel("Cumulative contribution rate")
plt.grid()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_PCA.png")
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
# df_comp = pd.DataFrame(pca.components_, columns=df.columns, index=[f"{prefix}{x + 1}" for x in range(_n_components)])
# print(df_comp)
# plt.figure(figsize=(6, 6))
# for x, y, name in zip(pca.components_[0], pca.components_[1], df.columns):
# plt.text(x, y, name)
# plt.scatter(pca.components_[0], pca.components_[1], alpha=0.8)
# plt.grid()
# plt.xlabel("PC1")
# plt.ylabel("PC2")
# path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_PCA_scatter.png")
# #print("save: ", path_to_save)
# plt.savefig(path_to_save)
# plt.show(block=False)
# plt.close()
return df_reduced
def addNanPos(df, cols_list:list, suffix="nan_pos"):
for col in cols_list:
if df[col].isnull().any():
df["{}_{}".format(col, suffix)] = df[col].map(lambda x: 1 if pd.isna(x) else 0)
return df
def get_feature_importances(X, y, shuffle=False):
# 必要ならば目的変数をシャッフル
if shuffle:
y = np.random.permutation(y)
# モデルの学習
clf = RandomForestClassifier(random_state=42)
clf.fit(X, y)
# 特徴量の重要度を含むデータフレームを作成
imp_df = pd.DataFrame()
imp_df["feature"] = X.columns
imp_df["importance"] = clf.feature_importances_
return imp_df.sort_values("importance", ascending=False)
def nullImporcance(df_train_X, df_train_y, th=80, n_runs=100):
# 実際の目的変数でモデルを学習し、特徴量の重要度を含むデータフレームを作成
actual_imp_df = get_feature_importances(df_train_X, df_train_y, shuffle=False)
# 目的変数をシャッフルした状態でモデルを学習し、特徴量の重要度を含むデータフレームを作成
N_RUNS = n_runs
null_imp_df = pd.DataFrame()
for i in range(N_RUNS):
print("run : {}".format(i))
imp_df = get_feature_importances(df_train_X, df_train_y, shuffle=True)
imp_df["run"] = i + 1
null_imp_df = pd.concat([null_imp_df, imp_df])
def display_distributions(actual_imp_df, null_imp_df, feature, path_to_save_dir):
# ある特徴量に対する重要度を取得
actual_imp = actual_imp_df.query("feature == '{}'".format(feature))["importance"].mean()
null_imp = null_imp_df.query("feature == '{}'".format(feature))["importance"]
# 可視化
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
a = ax.hist(null_imp, label="Null importances")
ax.vlines(x=actual_imp, ymin=0, ymax=np.max(a[0]), color='r', linewidth=10, label='Real Target')
ax.legend(loc="upper right")
ax.set_title("Importance of {}".format(feature), fontweight='bold')
plt.xlabel("Null Importance Distribution for {}".format(feature))
plt.ylabel("Importance")
plt.show()
path_to_save = os.path.join(str(path_to_save_dir), "null_imp_{}".format(feature))
plt.savefig(path_to_save)
# 実データにおいて特徴量の重要度が高かった上位5位を表示
for feature in actual_imp_df["feature"]:
display_distributions(actual_imp_df, null_imp_df, feature, PATH_TO_GRAPH_DIR)
# 閾値を設定
THRESHOLD = th
# 閾値を超える特徴量を取得
null_features = []
for feature in actual_imp_df["feature"]:
print("Null :: {}".format(feature))
actual_value = actual_imp_df.query("feature=='{}'".format(feature))["importance"].values
null_value = null_imp_df.query("feature=='{}'".format(feature))["importance"].values
percentage = (null_value < actual_value).sum() / null_value.size * 100
print("actual_value: {}, null_value : {}, percentage : {}".format(actual_value, null_value, percentage))
if percentage < THRESHOLD and (100-THRESHOLD) < percentage:
null_features.append(feature)
return null_features
def makeFourArithmeticOperations(df, col1, col2):
new_col = "auto__{}_add_{}".format(col1, col2)
df[new_col] = df[col1] + df[col2]
new_col = "auto__{}_diff_{}".format(col1, col2)
df[new_col] = df[col1] - df[col2]
new_col = "auto__{}_multiply_{}".format(col1, col2)
df[new_col] = df[col1] * df[col2]
new_col = "auto__{}_devide_{}".format(col1, col2)
df[new_col] = df[col1] / df[col2]
return df
def procAgg(df:pd.DataFrame, base_group_col:str, agg_col:str, agg_list:list):
for agg_func in agg_list:
new_col = "auto__{}_{}_agg_by_{}".format(agg_col, agg_func, base_group_col)
map_dict = df.groupby(base_group_col)[agg_col].agg(agg_func)
print(new_col)
print(map_dict)
df[new_col] = df[base_group_col].map(map_dict)
df[new_col] = reduce_mem_Series(df[new_col])
#df = makeFourArithmeticOperations(df, new_col, agg_col)
return df
def aggregationFE(df:pd.DataFrame, base_group_cols:list, agg_cols:list, agg_func_list:list=['count', 'max', 'min', 'sum', 'mean', "nunique", "std", "median", "skew"]):
for b in base_group_cols:
for a in agg_cols:
df = procAgg(df, b, a, agg_func_list)
return df
def makeInteractionColumn(df:pd.DataFrame, inter_cols:list):
print(inter_cols)
for c in inter_cols:
col_name = "inter_" + "_".join(c)
print(col_name)
#df[col_name] = "_"
for i, col in enumerate(c):
print(col)
if i == 0:
df[col_name] = df[col]
else:
#
#print(df[col])
df[col_name] = df[col_name].map(lambda x : str(x)) + "_" + df[col].map(lambda x : str(x))
#print(df[col_name].unique())
print("****")
return df
def interactionFE(df:pd.DataFrame, cols:list=[], inter_nums:list=[]):
for inter_num in inter_nums:
inter_cols = itertools.combinations(cols, inter_num)
df = makeInteractionColumn(df, inter_cols)
# for c in itertools.combinations(cols, inter_num):
#
# col_name = "inter_" + "_".join(c)
# print(col_name)
# #df[col_name] = "_"
#
# for i, col in enumerate(c):
# print(col)
# if i == 0:
# df[col_name] = df[col]
# else:
# #
# #print(df[col])
# df[col_name] = df[col_name].map(lambda x : str(x)) + "_" + df[col].map(lambda x : str(x))
#
# print(df[col_name].unique())
return df
def interactionFEbyOne(df:pd.DataFrame, inter_col:str, target_cols:list, inter_nums:list=[1]):
for inter_num in inter_nums:
comb = itertools.combinations(target_cols, inter_num)
for c in comb:
if not inter_col in c:
inter_cols = (inter_col,) + c
print(inter_cols)
df = makeInteractionColumn(df, [inter_cols])
return df
def calcSmoothingParam(num_of_data, k=100, f=100):
param = 1 / (1 + np.exp(-(num_of_data - k)/f))
return param
def calcSmoothingTargetMean(df:pd.DataFrame, col_name, target_name):
#print(df[target_name])
all_mean = df[target_name].mean()
#print(all_mean)
#sys.exit()
df_vc = df[col_name].value_counts()
gp_mean_dict = df.groupby(col_name)[target_name].mean()
smooth_target_mean = df_vc.copy()
for key, val in gp_mean_dict.items():
n=df_vc[key]
param = calcSmoothingParam(num_of_data=n)
smooth = param * val + (1-param)*all_mean
smooth_target_mean[key] = smooth
print("label : {}, n = {}, val={}, all = {}, param = {}, final={}".format(key, n, val, all_mean, param, smooth))
del smooth_target_mean, df_vc
gc.collect()
return smooth_target_mean
def targetEncoding(df_train_X, df_train_y, encoding_cols:list, _n_splits=4, smooth_flag=0):
dict_all_train_target_mean = {}
for c in encoding_cols:
# print("Target Encoding : {}".format(c))
# print(f"df_train_X[c] : {df_train_X[c].shape}")
# print(f"df_train_y : {df_train_y.shape}")
#df_data_tmp = pd.DataFrame({c: df_train_X[c], "target":df_train_y})
df_data_tmp = pd.DataFrame(df_train_X[c])
df_data_tmp["target"] = df_train_y#.loc[:,0]
#nan_mean= -999#df_data_tmp["target"].mean()
nan_mean=df_data_tmp["target"].mean()
if smooth_flag:
all_train_target_mean=calcSmoothingTargetMean(df_data_tmp, c, "target")
else:
all_train_target_mean = df_data_tmp.groupby(c)["target"].mean()
dict_all_train_target_mean[c] = all_train_target_mean
#print(all_train_target_mean)
#df_test_X[c] = df_test_X[c].map(all_train_target_mean)
tmp = np.repeat(np.nan, df_train_X.shape[0])
kf = KFold(n_splits=_n_splits, shuffle=True, random_state=0)
for idx_1, idx_2 in kf.split(df_train_X):
if smooth_flag:
target_mean=calcSmoothingTargetMean(df_data_tmp.iloc[idx_1], c, "target")
else:
target_mean = df_data_tmp.iloc[idx_1].groupby(c)["target"].mean()
tmp[idx_2] = df_train_X[c].iloc[idx_2].map(target_mean)
idx_1_unique = df_data_tmp.iloc[idx_1][c].unique()
idx_2_unique = df_data_tmp.iloc[idx_2][c].unique()
for c2 in idx_2_unique:
if not c2 in idx_1_unique:
pass
#print("TARGET ENCORDING ERROR {}: {} replace to {}".format(c, c2, nan_mean))
df_train_X[c] = tmp
df_train_X[c].fillna(value=nan_mean, inplace=True)
#print(df_train_X.loc[df_train_X[c].isnull(), c])
#showNAN(df_train_X)
return df_train_X, dict_all_train_target_mean
def add_noise(series, noise_level):
return series * (1 + noise_level * np.random.randn(len(series)))
def calc_smoothing(se_gp_count, se_gp_mean, prior, min_samples_leaf=1, smoothing=1):
smoothing = 1 / (1 + np.exp(-(se_gp_count - min_samples_leaf) / smoothing))
se_smoothing_mean = prior * (1 - smoothing) + se_gp_mean * smoothing
return se_smoothing_mean #, smoothing
def TEST__calc_smoothing():
se_count = pd.Series(np.arange(2000000))
cpu_stats("before calc_smoothing")
se_count, smoothing = calc_smoothing(se_count, se_count, prior=50, min_samples_leaf=100, smoothing=300)
cpu_stats("after calc_smoothing")
#fig = plt.Figure()
plt.plot(se_count, smoothing, label="smoothing")
plt.show()
def target_encode_with_smoothing(trn_series=None,
#tst_series=None,
target_se=None,
min_samples_leaf=1,
smoothing=1,
#noise_level=0,
agg_val="mean",
):
"""
from https://www.kaggle.com/ogrellier/python-target-encoding-for-categorical-features/notebook
Smoothing is computed like in the following paper by <NAME>
https://kaggle2.blob.core.windows.net/forum-message-attachments/225952/7441/high%20cardinality%20categoricals.pdf
trn_series : training categorical feature as a pd.Series
tst_series : test categorical feature as a pd.Series
target : target data as a pd.Series
min_samples_leaf (int) : minimum samples to take category average into account
smoothing (int) : smoothing effect to balance categorical average vs prior
"""
assert len(trn_series) == len(target_se)
#assert trn_series.name == tst_series.name
temp = pd.concat([trn_series, target_se], axis=1)
# Compute target mean
averages = temp.groupby(by=trn_series.name)[target_se.name].agg([agg_val, "count"])
# Compute smoothing
smoothing = 1 / (1 + np.exp(-(averages["count"] - min_samples_leaf) / smoothing))
# Apply average function to all target data
if agg_val == "mean":
prior = target_se.mean()
elif agg_val == "std":
prior = target_se.std()
# The bigger the count the less full_avg is taken into account
averages[target_se.name] = prior * (1 - smoothing) + averages[agg_val] * smoothing
averages.drop([agg_val, "count"], axis=1, inplace=True)
return averages
# # Apply averages to trn and tst series
# ft_trn_series = pd.merge(
# trn_series.to_frame(trn_series.name),
# averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
# on=trn_series.name,
# how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# # pd.merge does not keep the index so restore it
# ft_trn_series.index = trn_series.index
# ft_tst_series = pd.merge(
# tst_series.to_frame(tst_series.name),
# averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
# on=tst_series.name,
# how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
#ft_tst_series.index = tst_series.index
#return add_noise(ft_trn_series, noise_level), add_noise(ft_tst_series, noise_level)
def showNAN(df):
# print(df.isnull())
# df.isnull().to_csv(PROC_DIR/'isnull.csv')
# print(df.isnull().sum())
# df.isnull().sum().to_csv(PROC_DIR/'isnull_sum.csv')
# total = df.isnull().sum().sort_values(ascending=False)
# print(total)
# print(f"count : {df.isnull().count()}")
# percent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False)
# missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
#df=df.replace([np.inf, -np.inf], np.nan)
nan_dict = {}
for col in df.columns:
total = df[col].isnull().sum()
percent = total / df[col].isnull().count()
nan_dict[col] = [total, percent]
missing_data = pd.DataFrame(nan_dict, index=['Total', 'Percent']).T
missing_data = missing_data.sort_values('Percent', ascending=False)
nan_data = missing_data.loc[missing_data["Percent"] > 0, :]
if not ON_KAGGLE:
print("****show nan*****")
print(nan_data)
print("****show nan end*****\n")
nan_list = list(nan_data.index)
#del missing_data, nan_data
#gc.collect()
return nan_list
def accumAdd(accum_dict, dict_key_name, add_val, _empty_val=EMPTY_NUM):
if accum_dict[dict_key_name] == _empty_val:
accum_dict[dict_key_name] = add_val
else:
accum_dict[dict_key_name] += add_val
return accum_dict
def getColumnsFromParts(colums_parts, base_columns):
new_cols=[]
for col_p in colums_parts:
for col in base_columns:
if col_p in col:
if not col in new_cols:
new_cols.append(col)
#print("find from the part : {}".format(col))
return new_cols.copy()
def checkCorreatedFeatures(df, exclude_columns=[], th=0.995, use_cols=[]):
counter = 0
to_remove = []
if len(use_cols)==0:
use_cols = df.columns
for feat_a in use_cols:
if feat_a in exclude_columns:
continue
for feat_b in df.columns:
if feat_b in exclude_columns:
continue
if feat_a != feat_b and feat_a not in to_remove and feat_b not in to_remove:
#print('{}: FEAT_A: {}, FEAT_B: {}'.format(counter, feat_a, feat_b))
c = np.corrcoef(df[feat_a], df[feat_b])[0][1]
if c > th:
counter += 1
to_remove.append(feat_b)
print('{}: FEAT_A: {}, FEAT_B (removed): {} - Correlation: {}'.format(counter, feat_a, feat_b, c))
return to_remove.copy()
def addStrToLastWithoutContinuous(chain_string, added_str, splitter="_"):
string_list = chain_string.split(splitter)
if string_list[-1] != added_str:
return chain_string + splitter + added_str
else:
return chain_string
def adv2(_df_train, _df_test, drop_cols):
df_train = _df_train.copy()
df_test = _df_test.copy()
print(len(df_train))
print(len(df_test))
df_train["isTest"] = 0
df_test["isTest"] = 1
drop_cols.append("isTest")
df = pd.concat([df_train, df_test])
#train 0, test 1
df_X = df.drop(columns= drop_cols)
df_y = df["isTest"]
columns=df_X.columns.to_list()
train, test, y_train, y_test = train_test_split(df_X, df_y, test_size=0.33, random_state=42, shuffle=True)
del df, df_y, df_X
gc.collect()
train = lgb.Dataset(train, label=y_train)
test = lgb.Dataset(test, label=y_test)
param = {'num_leaves': 50,
'min_data_in_leaf': 30,
'objective':'binary',
'max_depth': 5,
'learning_rate': 0.05,
"min_child_samples": 20,
"boosting": "gbdt",
"feature_fraction": 0.9,
"bagging_freq": 1,
"bagging_fraction": 0.9 ,
"bagging_seed": 44,
"metric": 'auc',
"verbosity": -1,
'importance_type':'gain',
}
num_round = 1000
clf = lgb.train(param, train, num_round, valid_sets = [train, test], verbose_eval=50, early_stopping_rounds = 500)
feature_imp = pd.DataFrame(sorted(zip(clf.feature_importance(),columns)), columns=['Value','Feature'])
plt.figure(figsize=(20, 20))
sns.barplot(x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False).head(100))
plt.title('LightGBM Features')
plt.tight_layout()
plt.show()
plt.savefig(str(PATH_TO_GRAPH_DIR / 'lgbm_importances-01.png'))
def adversarialValidation(_df_train, _df_test, drop_cols, sample_flag=False):
df_train = _df_train.copy()
df_test = _df_test.copy()
if sample_flag:
num_test = len(df_test)
df_train = df_train.sample(n=num_test)
print(f"df_train : {len(df_train)}")
print(f"df_test : {len(df_test)}")
df_train["isTest"] = 0
df_test["isTest"] = 1
drop_cols.append("isTest")
df = pd.concat([df_train, df_test])
#train 0, test 1
df_X = df.drop(columns= drop_cols)
df_y = df["isTest"]
adv_params = {
'learning_rate': 0.05,
'n_jobs': -1,
'seed': 50,
'objective':'binary',
'boosting_type':'gbdt',
'is_unbalance': False,
'importance_type':'gain',
'metric': 'auc',
'verbose': 1,
}
model = lgb.LGBMClassifier(n_estimators=100)
model.set_params(**adv_params)
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
score = cross_validate(model, df_X, df_y, cv=skf, return_estimator=True, scoring="roc_auc")
adv_acc = score['test_score'].mean()
print('Adv AUC:', score['test_score'].mean())
feature_imp = pd.DataFrame(sorted(zip(score['estimator'][0].feature_importances_,df_X.columns), reverse=True), columns=['Value','Feature'])
print(feature_imp)
#graphImportance(feature_imp, 50)
#base_name = '../data/features/adv_feature_imp_' + datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
#feature_imp.to_csv(base_name + '.csv')
# f = open(base_name + '.pkl', 'wb')
# pickle.dump(feature_imp, f)
# f.close()
for i in range(len(feature_imp["Feature"])):
if feature_imp["Value"].values[i] > 0:
str_col = "\'" + feature_imp["Feature"].values[i] + "\',"
print(str_col)
return adv_acc, feature_imp
def get_too_many_null_attr(data, rate=0.9):
# many_null_cols = []
# for col in data.columns:
# print(col)
# if data[col].isnull().sum() / data.shape[0] > 0.9:
# many_null_cols.append(col)
# print("DONE!!!!!")
many_null_cols = [col for col in data.columns if data[col].isnull().sum() / data.shape[0] > rate]
return many_null_cols
def get_too_many_repeated_val(data, rate=0.95):
big_top_value_cols = [col for col in data.columns if data[col].value_counts(dropna=False, normalize=True).values[0] > rate]
return big_top_value_cols
def get_useless_columns(data, null_rate=0.95, repeat_rate=0.95):
too_many_null = get_too_many_null_attr(data, null_rate)
print("More than {}% null: ".format(null_rate) + str(len(too_many_null)))
print(too_many_null)
too_many_repeated = get_too_many_repeated_val(data, repeat_rate)
print("More than {}% repeated value: ".format(repeat_rate) + str(len(too_many_repeated)))
print(too_many_repeated)
cols_to_drop = list(set(too_many_null + too_many_repeated))
return cols_to_drop
def get_useless_columnsTrainTest(df_train, df_test, null_rate=0.95, repeat_rate=0.95):
drop_train = set(get_useless_columns(df_train, null_rate=null_rate, repeat_rate=repeat_rate))
drop_test = set(get_useless_columns(df_test, null_rate=null_rate, repeat_rate=repeat_rate)) if not df_test.empty else set([])
s_symmetric_difference = drop_train ^ drop_test
if s_symmetric_difference:
print("{} are not included in each set".format(s_symmetric_difference))
cols_to_drop = list((drop_train) & (drop_test))
print("intersection cols_to_drop")
print(cols_to_drop)
return cols_to_drop
def transformCosCircle(df, time_col_str):
val = [float(x) for x in df[time_col_str].unique()]
val.sort()
#print(val)
num = len(val)
unit = 180.0 / num
#print(unit)
trans_val = [x * unit for x in val]
#print(trans_val)
df[time_col_str + "_angle_rad"] = np.deg2rad(df[time_col_str].replace(val, trans_val))
df[time_col_str + "_cos"] = np.cos(df[time_col_str + "_angle_rad"])
df[time_col_str + "_sin"] = np.sin(df[time_col_str + "_angle_rad"])
df = df.drop(columns=[time_col_str + "_angle_rad"])
#print(df[time_col_str])
return df
def extract_time_features(df, date_col):
df[date_col] = pd.to_datetime(df[date_col])
df['month'] = df[date_col].dt.month
df['day'] = df[date_col].dt.day
#df['hour'] = df[date_col].dt.hour
df['year'] = df[date_col].dt.year
#df["seconds"] = df[date_col].dt.second
df['dayofweek'] = df[date_col].dt.dayofweek #0:monday to 6: sunday
#df['week'] = df[date_col].dt.week # the week ordinal of the year
df['weekofyear'] = df[date_col].dt.weekofyear # the week ordinal of the year
df['dayofyear'] = df[date_col].dt.dayofyear #1-366
df['quarter'] = df[date_col].dt.quarter
df['is_month_start'] = df[date_col].dt.is_month_start
df['is_month_end'] = df[date_col].dt.is_month_end
df['is_quarter_start'] = df[date_col].dt.is_quarter_start
df['is_quarter_end'] = df[date_col].dt.is_quarter_end
df['is_year_start'] = df[date_col].dt.is_year_start
df['is_year_end'] = df[date_col].dt.is_year_end
df['is_leap_year'] = df[date_col].dt.is_leap_year
df['days_in_month'] = df['date'].dt.daysinmonth
df["days_from_end_of_month"] = df['days_in_month'] - df["day"]
df["days_rate_in_month"] = (df["day"] -1) / (df['days_in_month'] - 1)
df["s_m_e_in_month"] = df["day"].map(lambda x: 0 if x <= 10 else (1 if x <= 20 else 2))
# df = transformCosCircle(df, "day")
# df = transformCosCircle(df, "month")
# df = transformCosCircle(df, "dayofweek")
# df = transformCosCircle(df, "weekofyear")
# df = transformCosCircle(df, "dayofyear")
return df
def pickle_dump(obj, path):
with open(path, mode='wb') as f:
pickle.dump(obj,f)
def pickle_load(path):
data = None
with open(path, mode='rb') as f:
data = pickle.load(f)
return data
def procLabelEncToColumns(df_train, df_test, col_list):
df_train["train_test_judge"] = "train"
df_test["train_test_judge"] = "test"
df = pd.concat([df_train, df_test])
for c in col_list:
df[c] = procLabelEncToSeries(df[c])
df_train = df.loc[df["train_test_judge"]=="train"].drop(columns=["train_test_judge"])
df_test = df.loc[df["train_test_judge"]=="test"].drop(columns=["train_test_judge"])
return df_train, df_test
def procLabelEncToSeries(se):
val_list = list(se.dropna().unique())
val_list.sort()
#print(df[f].unique())
replace_map = dict(zip(val_list, np.arange(len(val_list))))
se = se.map(replace_map)
#pdb.set_trace()
return se
def proclabelEncodings(df, not_proc_list=[]):
#lbl = preprocessing.LabelEncoder()
if not ON_KAGGLE:
print("**label encoding**")
decode_dict = {}
for f in df.columns:
if df[f].dtype.name =='object':
if f in not_proc_list:
continue
if not ON_KAGGLE:
print(f)
val_list = list(df[f].dropna().unique())
val_list.sort()
#print(df[f].unique())
replace_map = dict(zip(val_list, np.arange(len(val_list))))
df[f] = df[f].map(replace_map)
#print(df[f].unique())
inverse_dict = get_swap_dict(replace_map)
decode_dict[f] = inverse_dict
#lbl.fit(list(df[f].dropna().unique()))
#print(list(lbl.classes_))
#df[f] = lbl.transform(list(df[f].values))
if not ON_KAGGLE:
print("**label encoding end **\n")
print("**for dicode**")
#print(f"{decode_dict}")
return df, decode_dict
def qwk(act,pred,n=4,hist_range=(0,3), weights=None):
O = confusion_matrix(act,pred, labels=[0, 1, 2, 3],sample_weight = weights)
O = np.divide(O,np.sum(O))
W = np.zeros((n,n))
for i in range(n):
for j in range(n):
W[i][j] = ((i-j)**2)/((n-1)**2)
act_hist = np.histogram(act,bins=n,range=hist_range, weights=weights)[0]
prd_hist = np.histogram(pred,bins=n,range=hist_range, weights=weights)[0]
E = np.outer(act_hist,prd_hist)
E = np.divide(E,np.sum(E))
num = np.sum(np.multiply(W,O))
den = np.sum(np.multiply(W,E))
return 1-np.divide(num,den)
def calcClass(X, coef):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
else:
X_p[i] = 3
return X_p
class OptimizedRounder(object):
"""
An optimizer for rounding thresholds
to maximize Quadratic Weighted Kappa (QWK) score
"""
def __init__(self):
self.coef_ = 0
def _kappa_loss(self, coef, X, y):
#print(coef)
"""
Get loss according to
using current coefficients
:param coef: A list of coefficients that will be used for rounding
:param X: The raw predictions
:param y: The ground truth labels
"""
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
#elif pred >= coef[2] and pred < coef[3]:
# X_p[i] = 3
else:
X_p[i] = 3
#ll = cohen_kappa_score(y, X_p, weights='quadratic')
ll = qwk(y, X_p)
#print(ll)
return -ll
def fit(self, X, y, initial_coef):
"""
Optimize rounding thresholds
:param X: The raw predictions
:param y: The ground truth labels
"""
loss_partial = partial(self._kappa_loss, X=X, y=y)
#initial_coef = th_list
self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')
return self.coefficients()
def predict(self, X, coef):
"""
Make predictions with specified thresholds
:param X: The raw predictions
:param coef: A list of coefficients that will be used for rounding
"""
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
#elif pred >= coef[2] and pred < coef[3]:
# X_p[i] = 3
else:
X_p[i] = 3
return X_p
def coefficients(self):
"""
Return the optimized coefficients
"""
return self.coef_['x']
def calcDropColsFromPermuationImportance(path_to_dir):
ppath_to_dir = Path(path_to_dir)
df_total = pd.DataFrame()
for i, f in enumerate(ppath_to_dir.glob("permutation_feature_imp_*.csv")):
df_imp = | pd.read_csv(f, index_col=1) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from ber_public.deap import dim
@pytest.fixture
def building_fabric():
floor_uvalue = pd.Series([0.14])
roof_uvalue = pd.Series([0.11])
wall_uvalue = pd.Series([0.13])
window_uvalue = pd.Series([0.87])
door_uvalue = | pd.Series([1.5]) | pandas.Series |
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
begge_kjonn_5 = pd.read_csv("begge_kjonn_5.csv")
gutter_5 = pd.read_csv("gutter_5.csv")
jenter_5 = pd.read_csv("jenter_5.csv")
jenter_gutter_5 = pd.concat([gutter_5, jenter_5]).reset_index(drop=True)
begge_kjonn_8 = pd.read_csv("begge_kjonn_8.csv")
gutter_8 = pd.read_csv("gutter_8.csv")
jenter_8 = pd.read_csv("jenter_8.csv")
jenter_gutter_8 = pd.concat([gutter_8, jenter_8]).reset_index(drop=True)
# Save tables to excel file, for the heck of it
begge_kjonn_9 = pd.read_csv("begge_kjonn_9.csv")
gutter_9 = pd.read_csv("gutter_9.csv")
jenter_9 = pd.read_csv("jenter_9.csv")
jenter_gutter_9 = | pd.concat([gutter_9, jenter_9]) | pandas.concat |
import os
import glob
import numpy as np
import pandas as pd
import lightgbm as lgb
from tqdm import tqdm_notebook as tqdm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from constants import DATA_DIR
def csv_concatenate(folder_path, nested=False):
# Concatenate all csv files under a directory
if nested == True:
files = glob.glob(folder_path + "/*/*.csv")
else:
files = glob.glob(folder_path + "/*.csv")
df_list = []
for file in files:
df_list.append(pd.read_csv(file, parse_dates=True,
infer_datetime_format=True))
# Fill nan with 0s as some values are empty for percentage points
df = | pd.concat(df_list) | pandas.concat |
import pandas as pd
import pytest
from .. import sqftproforma as sqpf
from .. import developer
@pytest.fixture
def simple_dev_inputs():
return pd.DataFrame(
{'residential': [40, 40, 40],
'office': [15, 18, 15],
'retail': [12, 10, 10],
'industrial': [12, 12, 12],
'land_cost': [1000000, 2000000, 3000000],
'parcel_size': [10000, 20000, 30000],
'max_far': [2.0, 3.0, 4.0],
'max_height': [40, 60, 80]},
index=['a', 'b', 'c'])
def test_developer(simple_dev_inputs):
pf = sqpf.SqFtProForma()
out = pf.lookup("residential", simple_dev_inputs)
dev = developer.Developer({"residential": out})
target_units = 10
parcel_size = pd.Series([1000, 1000, 1000], index=['a', 'b', 'c'])
ave_unit_size = | pd.Series([650, 650, 650], index=['a', 'b', 'c']) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # IBM HR Employee Attrition & Performance.
# ## [Please star/upvote in case you find it helpful.]
# In[ ]:
from IPython.display import Image
Image("../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/imagesibm/image-logo.png")
# ## CONTENTS ::->
# [ **1 ) Exploratory Data Analysis**](#content1)
# [ **2) Corelation b/w Features**](#content2)
# [** 3) Feature Selection**](#content3)
# [** 4) Preparing Dataset**](#content4)
# [ **5) Modelling**](#content5)
#
# Note that this notebook uses traditional ML algorithms. I have another notebook in which I have used an ANN on the same dataset. To check it out please follow the below link-->
#
# https://www.kaggle.com/rajmehra03/an-introduction-to-ann-keras-with-ibm-hr-dataset/
# [ **6) Conclusions**](#content6)
# <a id="content1"></a>
# ## 1 ) Exploratory Data Analysis
# ## 1.1 ) Importing Various Modules
# In[ ]:
# Ignore the warnings
import warnings
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
# data visualisation and manipulation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
import missingno as msno
#configure
# sets matplotlib to inline and displays graphs below the corressponding cell.
style.use('fivethirtyeight')
sns.set(style='whitegrid',color_codes=True)
#import the necessary modelling algos.
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
#model selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score,confusion_matrix,roc_curve,roc_auc_score
from sklearn.model_selection import GridSearchCV
from imblearn.over_sampling import SMOTE
#preprocess.
from sklearn.preprocessing import MinMaxScaler,StandardScaler,Imputer,LabelEncoder,OneHotEncoder
# ## 1.2 ) Reading the data from a CSV file
# In[ ]:
df=pd.read_csv(r"../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv")
# In[ ]:
df.head()
# In[ ]:
df.shape
# In[ ]:
df.columns
# ## 1.3 ) Missing Values Treatment
# In[ ]:
df.info() # no null or 'Nan' values.
# In[ ]:
df.isnull().sum()
# In[ ]:
msno.matrix(df) # just to visualize. one final time.
# ## 1.4 ) The Features and the 'Target'
# In[ ]:
df.columns
# In[ ]:
df.head()
# In all we have 34 features consisting of both the categorical as well as the numerical features. The target variable is the
# 'Attrition' of the employee which can be either a Yes or a No. This is what we have to predict.
# **Hence this is a Binary Classification problem. **
# ## 1.5 ) Univariate Analysis
# In this section I have done the univariate analysis i.e. I have analysed the range or distribution of the values that various features take. To better analyze the results I have plotted various graphs and visualizations wherever necessary. Univariate analysis helps us identify the outliers in the data.
# In[ ]:
df.describe()
# Let us first analyze the various numeric features. To do this we can actually plot a boxplot showing all the numeric features. Also the distplot or a histogram is a reasonable choice in such cases.
# In[ ]:
sns.factorplot(data=df,kind='box',size=10,aspect=3)
# Note that all the features have pretty different scales and so plotting a boxplot is not a good idea. Instead what we can do is plot histograms of various continuously distributed features.
#
# We can also plot a kdeplot showing the distribution of the feature. Below I have plotted a kdeplot for the 'Age' feature.
# Similarly we plot for other numeric features also. Similarly we can also use a distplot from seaborn library which combines most..
# In[ ]:
sns.kdeplot(df['Age'],shade=True,color='#ff4125')
# In[ ]:
sns.distplot(df['Age'])
# Similarly we can do this for all the numerical features. Below I have plotted the subplots for the other features.
# In[ ]:
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
fig,ax = plt.subplots(5,2, figsize=(9,9))
sns.distplot(df['TotalWorkingYears'], ax = ax[0,0])
sns.distplot(df['MonthlyIncome'], ax = ax[0,1])
sns.distplot(df['YearsAtCompany'], ax = ax[1,0])
sns.distplot(df['DistanceFromHome'], ax = ax[1,1])
sns.distplot(df['YearsInCurrentRole'], ax = ax[2,0])
sns.distplot(df['YearsWithCurrManager'], ax = ax[2,1])
sns.distplot(df['YearsSinceLastPromotion'], ax = ax[3,0])
sns.distplot(df['PercentSalaryHike'], ax = ax[3,1])
sns.distplot(df['YearsSinceLastPromotion'], ax = ax[4,0])
sns.distplot(df['TrainingTimesLastYear'], ax = ax[4,1])
plt.tight_layout()
print()
# Let us now analyze the various categorical features. Note that in these cases the best way is to use a count plot to show the relative count of observations of different categories.
# In[ ]:
cat_df=df.select_dtypes(include='object')
# In[ ]:
cat_df.columns
# In[ ]:
def plot_cat(attr,labels=None):
if(attr=='JobRole'):
sns.factorplot(data=df,kind='count',size=5,aspect=3,x=attr)
return
sns.factorplot(data=df,kind='count',size=5,aspect=1.5,x=attr)
# I have made a function that accepts the name of a string. In our case this string will be the name of the column or attribute which we want to analyze. The function then plots the countplot for that feature which makes it easier to visualize.
# In[ ]:
plot_cat('Attrition')
# **Note that the number of observations belonging to the 'No' category is way greater than that belonging to 'Yes' category. Hence we have skewed classes and this is a typical example of the 'Imbalanced Classification Problem'. To handle such types of problems we need to use the over-sampling or under-sampling techniques. I shall come back to this point later.**
# **Let us now similalry analyze other categorical features.**
# In[ ]:
plot_cat('BusinessTravel')
# The above plot clearly shows that most of the people belong to the 'Travel_Rarely' class. This indicates that most of the people did not have a job which asked them for frequent travelling.
# In[ ]:
plot_cat('OverTime')
# In[ ]:
plot_cat('Department')
# In[ ]:
plot_cat('EducationField')
# In[ ]:
plot_cat('Gender')
# Note that males are present in higher number.
# In[ ]:
plot_cat('JobRole')
# ** Similarly we can continue for other categorical features. **
# **Note that the same function can also be used to better analyze the numeric discrete features like 'Education','JobSatisfaction' etc...
# In[ ]:
# just uncomment the following cell.
# In[ ]:
# num_disc=['Education','EnvironmentSatisfaction','JobInvolvement','JobSatisfaction','WorkLifeBalance','RelationshipSatisfaction','PerformanceRating']
# for i in num_disc:
# plot_cat(i)
# similarly we can intrepret these graphs.
# <a id="content2"></a>
# ## 2 ) Corelation b/w Features
#
# In[ ]:
#corelation matrix.
cor_mat= df.corr()
mask = np.array(cor_mat)
mask[np.tril_indices_from(mask)] = False
fig=plt.gcf()
fig.set_size_inches(30,12)
# ###### SOME INFERENCES FROM THE ABOVE HEATMAP
#
# 1. Self relation ie of a feature to itself is equal to 1 as expected.
#
# 2. JobLevel is highly related to Age as expected as aged employees will generally tend to occupy higher positions in the company.
#
# 3. MonthlyIncome is very strongly related to joblevel as expected as senior employees will definately earn more.
#
# 4. PerformanceRating is highly related to PercentSalaryHike which is quite obvious.
#
# 5. Also note that TotalWorkingYears is highly related to JobLevel which is expected as senior employees must have worked for a larger span of time.
#
# 6. YearsWithCurrManager is highly related to YearsAtCompany.
#
# 7. YearsAtCompany is related to YearsInCurrentRole.
#
#
# **Note that we can drop some highly corelated features as they add redundancy to the model but since the corelation is very less in genral let us keep all the features for now. In case of highly corelated features we can use something like Principal Component Analysis(PCA) to reduce our feature space.**
# In[ ]:
df.columns
# <a id="content3"></a>
# ## 3 ) Feature Selection
#
# ## 3.1 ) Plotting the Features against the 'Target' variable.
# #### 3.1.1 ) Age
# Note that Age is a continuous quantity and therefore we can plot it against the Attrition using a boxplot.
# In[ ]:
sns.factorplot(data=df,y='Age',x='Attrition',size=5,aspect=1,kind='box')
# Note that the median as well the maximum age of the peole with 'No' attrition is higher than that of the 'Yes' category. This shows that people with higher age have lesser tendency to leave the organisation which makes sense as they may have settled in the organisation.
# #### 3.1.2 ) Department
# Note that both Attrition(Target) as well as the Deaprtment are categorical. In such cases a cross-tabulation is the most reasonable way to analyze the trends; which shows clearly the number of observaftions for each class which makes it easier to analyze the results.
# In[ ]:
df.Department.value_counts()
# In[ ]:
sns.factorplot(data=df,kind='count',x='Attrition',col='Department')
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.Department],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Note that most of the observations corresspond to 'No' as we saw previously also. About 81 % of the people in HR dont want to leave the organisation and only 19 % want to leave. Similar conclusions can be drawn for other departments too from the above cross-tabulation.
# #### 3.1.3 ) Gender
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.Gender],margins=True,normalize='index') # set normalize=index to view rowwise %.
# About 85 % of females want to stay in the organisation while only 15 % want to leave the organisation. All in all 83 % of employees want to be in the organisation with only being 16% wanting to leave the organisation or the company.
# #### 3.1.4 ) Job Level
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.JobLevel],margins=True,normalize='index') # set normalize=index to view rowwise %.
# People in Joblevel 4 have a very high percent for a 'No' and a low percent for a 'Yes'. Similar inferences can be made for other job levels.
# #### 3.1.5 ) Monthly Income
# In[ ]:
sns.factorplot(data=df,kind='bar',x='Attrition',y='MonthlyIncome')
# Note that the average income for 'No' class is quite higher and it is obvious as those earning well will certainly not be willing to exit the organisation. Similarly those employees who are probably not earning well will certainly want to change the company.
# #### 3.1.6 ) Job Satisfaction
# In[ ]:
sns.factorplot(data=df,kind='count',x='Attrition',col='JobSatisfaction')
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.JobSatisfaction],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Note this shows an interesting trend. Note that for higher values of job satisfaction( ie more a person is satisfied with his job) lesser percent of them say a 'Yes' which is quite obvious as highly contented workers will obvioulsy not like to leave the organisation.
# #### 3.1.7 ) Environment Satisfaction
# In[ ]:
| pd.crosstab(columns=[df.Attrition],index=[df.EnvironmentSatisfaction],margins=True,normalize='index') | pandas.crosstab |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
class TestNumberScalar(object):
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool('x')
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, 'D'))
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer('x')
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta('1 days'))
# questionable
assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(np.bool(False))
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float('x')
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64('2011-01-01'))
assert not is_float(Timestamp('2011-01-01'))
assert not is_float(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, 'D'))
assert not is_float(Timedelta('1 days'))
def test_is_datetime_dtypes(self):
ts = pd.date_range('20130101', periods=3)
tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')
assert is_datetime64_dtype('datetime64')
assert is_datetime64_dtype('datetime64[ns]')
assert | is_datetime64_dtype(ts) | pandas.core.dtypes.common.is_datetime64_dtype |
from unittest import TestCase
import pandas as pd
from cbcvalidator.main import Validate, ValueOutOfRange, BadConfigurationError
class TestValidate(TestCase):
def test_validate(self):
v = Validate(verbose=True)
data = {'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': ['abcdefg', 'abcdefghijkl', 'a', 'b', 'c', 'd', 'ef', 'ghi']}
df = pd.DataFrame(data)
val_dict = [
{'col': 'a', 'min_val': 2, 'max_val': 7, 'action': 'null'},
{'col': 'b', 'max_len': 5, 'action': 'trim'},
{'col': 'b', 'min_len': 2, 'action': 'null'}
]
df, msg = v.validate(df, val_dict)
test = pd.isnull(df.loc[0, 'a'])
self.assertTrue(test)
# Test zero value limit (zero's eval to False)
data = {'a': [-1, 2, 3, 4, 5, 6, 7, 8],
'b': ['abcdefg', 'abcdefghijkl', 'a', 'b', 'c', 'd', 'ef', 'ghi']}
df = pd.DataFrame(data)
val_dict = [
{'col': 'a', 'min_val': 0, 'max_val': 7, 'action': 'null'},
{'col': 'b', 'max_len': 5, 'action': 'trim'},
{'col': 'b', 'min_len': 2, 'action': 'null'}
]
df, msg = v.validate(df, val_dict)
test = pd.isnull(df.loc[0, 'a'])
self.assertTrue(test)
test = len(df.loc[0, 'b'])
golden = 5
self.assertEqual(golden, test)
data = {'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': ['abcdefg', 'abcdefghijkl', 'a', 'b', 'c', 'd', 'ef', 'ghi']}
df = pd.DataFrame(data)
val_dict = [
{'col': 'a', 'max_val': 7, 'action': 'null'},
{'col': 'a', 'min_val': 3, 'action': 'print'},
{'col': 'b', 'max_len': 5, 'action': 'print'},
{'col': 'b', 'min_len': 3, 'action': 'null'}
]
df, msg = v.validate(df, val_dict)
test = pd.isnull(df.loc[7, 'a'])
self.assertTrue(test)
test = pd.isnull(df.loc[2, 'b'])
self.assertTrue(test)
# Test value out of range raises
data = {'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': ['abcdefg', 'abcdefghijkl', 'a', 'b', 'c', 'd', 'ef', 'ghi']}
df = pd.DataFrame(data)
val_dict = [
{'col': 'a', 'max_val': 7, 'action': 'raise'},
]
with self.assertRaises(ValueOutOfRange) as context:
df, msg = v.validate(df, val_dict)
# Test with no validation criteria matching.
data = {'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': ['abcdefg', 'abcdefghijkl', 'a', 'b', 'c', 'd', 'ef', 'ghi']}
df = pd.DataFrame(data)
val_dict = [
{'col': 'a', 'max_val': 99, 'action': 'null'},
]
df, msg = v.validate(df, val_dict)
self.assertIsNone(msg)
# Check that fully empty series works.
data = {'a': [None, None, None, None, None, None, None, None]}
df = pd.DataFrame(data)
val_dict = [
{'col': 'a', 'max_val': 7, 'action': 'null'}
]
df, msg = v.validate(df, val_dict)
# So long as this doesn't raise an error it's fine.
# Test what happens when a numeric column is processed as a string. This should do nothing, but print a
# warning.
data = {'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': ['abcdefg', 'abcdefghijkl', 'a', 'b', 'c', 'd', 'ef', 'ghi']}
df = pd.DataFrame(data)
val_dict = [
{'col': 'a', 'min_len': 2, 'max_len': 7, 'action': 'trim'}
]
df, msg = v.validate(df, val_dict)
test = df.loc[0, 'a']
self.assertEqual(1, test)
# Test for a missing column
data = {'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': ['abcdefg', 'abcdefghijkl', 'a', 'b', 'c', 'd', 'ef', 'ghi']}
df = pd.DataFrame(data)
val_dict = [
{'col': 'not_a_col_name', 'min_len': 2, 'max_len': 7, 'action': 'trim'}
]
df, msg = v.validate(df, val_dict)
test = df.loc[0, 'a']
self.assertEqual(1, test)
# Test value out of range raises
data = {'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': ['abcdefg', 'abcdefghijkl', 'a', 'b', 'c', 'd', 'ef', 'ghi']}
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
def find_ms(df):
subset_index = df[['BMI', 'Systolic', 'Diastolic',
'Triglyceride', 'HDL-C', 'Glucose',
'Total Cholesterol', 'Gender']].dropna().index
df = df.ix[subset_index]
df_bmi_lo = df.loc[df['BMI']<25.0,:]
df_bmi_hi = df.loc[df['BMI']>=25.0,:]
df_bmi_hi['TG-s'] = df_bmi_hi['Triglyceride']>=150.0
df_bmi_hi['Gluc-s'] = df_bmi_hi['Glucose'] >= 100.0
df_bmi_hi['Systolic-s'] = df_bmi_hi['Systolic']>=130.0
df_bmi_hi['Diastolic-s'] = df_bmi_hi['Diastolic'] >=85.0
df_bmi_hi['TG-s'] = df_bmi_hi['TG-s'].astype(int)
df_bmi_hi['Gluc-s'] = df_bmi_hi['Gluc-s'].astype(int)
df_bmi_hi['BP-s'] = df_bmi_hi['Systolic-s'] | df_bmi_hi['Diastolic-s']
df_bmi_hi['BP-s'] = df_bmi_hi['BP-s'].astype(int)
male_df_bmi_hi = df_bmi_hi[df_bmi_hi['Gender'] == 1]
female_df_bmi_hi = df_bmi_hi[df_bmi_hi['Gender'] == 2]
male_df_bmi_hi['HDL-C-s'] = male_df_bmi_hi['HDL-C'] < 40.0
female_df_bmi_hi['HDL-C-s'] = female_df_bmi_hi['HDL-C'] < 50.0
female_df_bmi_hi['HDL-C-s'] = female_df_bmi_hi['HDL-C-s'].astype(int)
male_df_bmi_hi['HDL-C-s'] = male_df_bmi_hi['HDL-C-s'].astype(int)
male_df_bmi_hi['MS'] = male_df_bmi_hi[['BP-s', 'Gluc-s',
'TG-s', 'HDL-C-s']].sum(axis=1)
female_df_bmi_hi['MS'] = female_df_bmi_hi[['BP-s', 'Gluc-s',
'TG-s', 'HDL-C-s']].sum(axis=1)
df_bmi_lo['MS'] = 0
return pd.concat([df_bmi_lo, male_df_bmi_hi, female_df_bmi_hi])
def set_kpi(df):
# BMI
df['BMI-kpi'] = df['BMI']>=30
# BP
df['Systolic-kpi'] = df['Systolic']>=140.0
df['Diastolic-kpi'] = df['Diastolic'] >=90.0
df['BP-kpi'] = df['Systolic-kpi'] | df['Diastolic-kpi']
# Glucose
df['Glucose-kpi'] = df['Glucose'] >= 125.0
# Creatinine
df['Creatinine-kpi'] = df['Creatinine'] >=1.5
# Total Cholesterol
df['Total Cholesterol-kpi-a'] = df['Total Cholesterol'] >= 200
df['Total Cholesterol-kpi-b'] = df['Total Cholesterol'] >= 240
# HDL-C
df['HDL-C-kpi'] = df['HDL-C'] <= 35
# LDL-C (direct)
df['LDL-C (Calculated)-kpi'] = df['LDL-C (Calculated)'] >=130
# Hemoglobin
df['CBC_Hemoglobin-kpi'] = df['CBC_Hemoglobin'] <= 10
def calculate_kpi_score(df):
if len(df) == 0:
return None
bmi_kpi = df['BMI-kpi']
bp_kpi = df['BP-kpi']
glucose_kpi = df['Glucose-kpi']
creat_kpi = df['Creatinine-kpi']
choles_kpi_a = df['Total Cholesterol-kpi-a']
choles_kpi_b = df['Total Cholesterol-kpi-b']
hdl_kpi = df['HDL-C-kpi']
ldl_kpi = df['LDL-C (Calculated)-kpi']
hgb_kpi = df['CBC_Hemoglobin-kpi']
ms_kpi = df['MS']>1
bmi_kpi_percnt = len(df.ix[bmi_kpi,:])/float(len(df))
bp_kpi_percnt = len(df.ix[bp_kpi,:])/float(len(df))
glucose_kpi_percnt = len(df.ix[glucose_kpi,:])/float(len(df))
creat_kpi_percnt = len(df.ix[creat_kpi,:])/float(len(df))
choles_kpi_a_percnt = len(df.ix[choles_kpi_a,:])/float(len(df))
choles_kpi_b_percnt = len(df.ix[choles_kpi_b,:])/float(len(df))
hdl_kpi_percnt = len(df.ix[hdl_kpi,:])/float(len(df))
ldl_kpi_percnt = len(df.ix[ldl_kpi,:])/float(len(df))
hgb_kpi_percnt = len(df.ix[hgb_kpi,:])/float(len(df))
ms_kpi_percnt = len(df.ix[ms_kpi,:])/float(len(df))
#print 'bmi', bmi_kpi_percnt
#print 'bp', bp_kpi_percnt
#print 'glucose', glucose_kpi_percnt
#print 'cretinine', creat_kpi_percnt
#print 'cholesterol a', choles_kpi_a_percnt
#print 'cholesterol b', choles_kpi_b_percnt
#print 'HDL', hdl_kpi_percnt
#print 'LDL', ldl_kpi_percnt
#print 'hemoglobin', hgb_kpi_percnt
#print 'metabolic syndrome', ms_kpi_percnt
data = | pd.Series() | pandas.Series |
import numpy as np
import pytest
from pandas.compat import range, u, zip
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.core.common as com
from pandas.core.indexing import IndexingError
from pandas.util import testing as tm
@pytest.fixture
def frame_random_data_integer_multi_index():
levels = [[0, 1], [0, 1, 2]]
codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, codes=codes)
return DataFrame(np.random.randn(6, 2), index=index)
@pytest.fixture
def dataframe_with_duplicate_index():
"""Fixture for DataFrame used in tests for gh-4145 and gh-4146"""
data = [['a', 'd', 'e', 'c', 'f', 'b'],
[1, 4, 5, 3, 6, 2],
[1, 4, 5, 3, 6, 2]]
index = ['h1', 'h3', 'h5']
columns = MultiIndex(
levels=[['A', 'B'], ['A1', 'A2', 'B1', 'B2']],
codes=[[0, 0, 0, 1, 1, 1], [0, 3, 3, 0, 1, 2]],
names=['main', 'sub'])
return DataFrame(data, index=index, columns=columns)
@pytest.mark.parametrize('access_method', [lambda s, x: s[:, x],
lambda s, x: s.loc[:, x],
lambda s, x: s.xs(x, level=1)])
@pytest.mark.parametrize('level1_value, expected', [
(0, Series([1], index=[0])),
(1, Series([2, 3], index=[1, 2]))
])
def test_series_getitem_multiindex(access_method, level1_value, expected):
# GH 6018
# series regression getitem with a multi-index
s = Series([1, 2, 3])
s.index = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 1)])
result = access_method(s, level1_value)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('level0_value', ['D', 'A'])
def test_getitem_duplicates_multiindex(level0_value):
# GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise
# the appropriate error, only in PY3 of course!
index = MultiIndex(levels=[[level0_value, 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
arr = np.random.randn(len(index), 1)
df = DataFrame(arr, index=index, columns=['val'])
# confirm indexing on missing value raises KeyError
if level0_value != 'A':
msg = "'A'"
with pytest.raises(KeyError, match=msg):
df.val['A']
msg = "'X'"
with pytest.raises(KeyError, match=msg):
df.val['X']
result = df.val[level0_value]
expected = Series(arr.ravel()[0:3], name='val', index=Index(
[26, 37, 57], name='day'))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer, is_level1, expected_error', [
([], False, None), # empty ok
(['A'], False, None),
(['A', 'D'], False, None),
(['D'], False, r"\['D'\] not in index"), # not any values found
(pd.IndexSlice[:, ['foo']], True, None),
(pd.IndexSlice[:, ['foo', 'bah']], True, None)
])
def test_getitem_duplicates_multiindex_missing_indexers(indexer, is_level1,
expected_error):
# GH 7866
# multi-index slicing with missing indexers
idx = MultiIndex.from_product([['A', 'B', 'C'],
['foo', 'bar', 'baz']],
names=['one', 'two'])
s = Series(np.arange(9, dtype='int64'), index=idx).sort_index()
if indexer == []:
expected = s.iloc[[]]
elif is_level1:
expected = Series([0, 3, 6], index=MultiIndex.from_product(
[['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index()
else:
exp_idx = MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']],
names=['one', 'two'])
expected = Series(np.arange(3, dtype='int64'),
index=exp_idx).sort_index()
if expected_error is not None:
with pytest.raises(KeyError, match=expected_error):
s.loc[indexer]
else:
result = s.loc[indexer]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns_indexer', [
([], slice(None)),
(['foo'], [])
])
def test_getitem_duplicates_multiindex_empty_indexer(columns_indexer):
# GH 8737
# empty indexer
multi_index = MultiIndex.from_product((['foo', 'bar', 'baz'],
['alpha', 'beta']))
df = DataFrame(np.random.randn(5, 6), index=range(5), columns=multi_index)
df = df.sort_index(level=0, axis=1)
expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0])
result = df.loc[:, columns_indexer]
tm.assert_frame_equal(result, expected)
def test_getitem_duplicates_multiindex_non_scalar_type_object():
# regression from < 0.14.0
# GH 7914
df = DataFrame([[np.mean, np.median], ['mean', 'median']],
columns=MultiIndex.from_tuples([('functs', 'mean'),
('functs', 'median')]),
index=['function', 'name'])
result = df.loc['function', ('functs', 'mean')]
expected = np.mean
assert result == expected
def test_getitem_simple(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data.T
expected = df.values[:, 0]
result = df['foo', 'one'].values
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('indexer,msg', [
(lambda df: df[('foo', 'four')], r"\('foo', 'four'\)"),
(lambda df: df['foobar'], "'foobar'")
])
def test_getitem_simple_key_error(
multiindex_dataframe_random_data, indexer, msg):
df = multiindex_dataframe_random_data.T
with pytest.raises(KeyError, match=msg):
indexer(df)
@pytest.mark.parametrize('indexer', [
lambda s: s[2000, 3],
lambda s: s.loc[2000, 3]
])
def test_series_getitem(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
result = indexer(s)
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
# =============================================================================
# ALGORITMO MACHINE LEARNING PARA GASOLINERAS EN ESPAÑA
# =============================================================================
"""
Proceso:
Input:
- /home/tfm/Documentos/TFM/Datasets/Gasolineras/Gasolineras_de_España.csv
Output:
- /home/tfm/Documentos/TFM/Datasets/Gasolineras/gas_stations_categorical.txt
- /home/tfm/Documentos/TFM/Datasets/Gasolineras/gasolineras_filt_Espana.csv
- /home/tfm/Documentos/TFM/Datasets/Gasolineras/gasolineras_reduced_Espana.csv
"""
# Se cargan las librerías
import sys
import datetime
import time
import math
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib
plt.style.use("ggplot")
from matplotlib.pyplot import figure
matplotlib.rcParams["figure.figsize"] = (12,8)
import unidecode
from sklearn.cluster import DBSCAN
from geopy.distance import great_circle
from shapely.geometry import MultiPoint
# 1.- Definición de funciones -------------------------------------
#------------------------------------------------------------------
def remove_accents(a):
"""
Definicion de la funcion remove_accents:
Funcion para eliminar acentos
Parametros
----------
a: string
String que contiene la palabra que tiene que limpiarse
Returns
------
unidecode.unidecode(a): string
String que contiene la palabra sin acentos
Ejemplo
-------
>>> df[column] = df[column].apply(remove_accents)
"""
return unidecode.unidecode(a)
def plot_multiple_histograms(df, cols):
"""
Definicion del procedimiento plot_multiple_histograms:
Procedimiento para plottear histogramas de variable numéricas
Parametros
----------
df: Pandas Dataframe
Dataframe que contiene las variables numéricas
cols: list[string]
Lista de string que contiene el nombre de las columnas de
las variables numéricas
Returns
------
No existe return como tal, si no que se ejecutan los comando en
el procedimiento
Ejemplo
-------
>>> plot_multiple_histograms(df_numeric, ["X","Y","FID","objectid",
"codigo_po","longitud","latitud","precio_g_1","precio_g_2"])
"""
num_plots = len(cols)
num_cols = math.ceil(np.sqrt(num_plots))
num_rows = math.ceil(num_plots/num_cols)
fig, axs = plt.subplots(num_rows, num_cols)
for ind, col in enumerate(cols):
i = math.floor(ind/num_cols)
j = ind - i*num_cols
if num_rows == 1:
if num_cols == 1:
sns.histplot(df[col], kde=True, ax=axs)
else:
sns.histplot(df[col], kde=True, ax=axs[j])
else:
sns.histplot(df[col], kde=True, ax=axs[i, j])
def plot_multiple_countplots(df, cols):
"""
Definicion del procedimiento plot_multiple_countplots:
Procedimiento para plottear variable numéricas
Parametros
----------
df: Pandas Dataframe
Dataframe que contiene las variables numéricas
cols: list[string]
Lista de string que contiene el nombre de las columnas de
las variables numéricas
Returns
------
No existe return como tal, si no que se ejecutan los comando en
el procedimiento
Ejemplo
-------
>>> plot_multiple_countplots(df_numeric, ["X","Y","FID","objectid",
"codigo_po","longitud","latitud","precio_g_1","precio_g_2"])
"""
num_plots = len(cols)
num_cols = math.ceil(np.sqrt(num_plots))
num_rows = math.ceil(num_plots/num_cols)
fig, axs = plt.subplots(num_rows, num_cols)
for ind, col in enumerate(cols):
i = math.floor(ind/num_cols)
j = ind - i*num_cols
if num_rows == 1:
if num_cols == 1:
sns.countplot(x=df[col], ax=axs)
else:
sns.countplot(x=df[col], ax=axs[j])
else:
sns.countplot(x=df[col], ax=axs[i, j])
def exploratory_data_analysis(df):
"""
Definicion de la funcion exploratory_data_analysis:
Funcion para que realiza el Análisis Exploratorio de Datos
sobre el conjunto de datos de las Gasolineras de España
Parametros
----------
df: Pandas Dataframe
Dataframe que contiene el dataset de Gasolineras de España
Returns
------
df_filt: Pandas Dataframe
Dataset que contiene el dataset de Gasolineras de España
filtrado
Ejemplo
-------
>>> df_filt = exploratory_data_analysis(df)
"""
df.head()
df.info()
# Eliminar acentos y otros caracteres especiales
for column in ["provincia","municipio","localidad","direccion","rotulo"]:
df[column] = df[column].apply(remove_accents)
# Variables numéricas
df_numeric = df.select_dtypes(include="number")
df_numeric.info()
plot_multiple_histograms(df_numeric, ["X","Y","FID","objectid","codigo_po","longitud","latitud","precio_g_1","precio_g_2"])
plt.show()
time.sleep(15)
plt.close("all")
comparison_column_1 = np.where(df["longitud"] == df["X"], True, False)
if np.all(comparison_column_1):
print("Longitud igual a X")
else:
print("Longitud NO es igual a X")
comparison_column_2 = np.where(df["latitud"] == df["Y"], True, False)
if np.all(comparison_column_2):
print("Latitud igual a Y")
else:
print("Latitud NO es igual a Y")
df.drop("X",axis="columns", inplace=True)
df.drop("Y",axis="columns", inplace=True)
# Filtrar coordenadas de España Peninsular
df_filt = df.query("longitud > -10 & latitud > 34 & provincia != 'BALEARS (ILLES)' & provincia != 'MELILLA' & provincia != 'CEUTA'")
print(df_filt["provincia"].unique())
sns.displot(df_filt["longitud"], bins=50, kde=True, rug=True)
sns.displot(df_filt["latitud"], bins=50, kde=True, rug=True)
df_filt.info()
print(df_filt.shape)
plot_multiple_histograms(df_filt, ["latitud","longitud"])
plt.show()
time.sleep(30)
plt.close("all")
# Variable categóricas
df_non_numeric = df_filt.select_dtypes(exclude="number")
df_non_numeric.info()
plt.figure(figsize=(25,7))
sns.countplot(x="provincia",
data=df_non_numeric)
plt.show()
time.sleep(15)
plt.close("all")
with open("/home/tfm/Documentos/TFM/Datasets/Gasolineras/gas_stations_categorical.txt", "w") as f:
for column in df_non_numeric:
print(df_non_numeric[column].value_counts(), file=f)
df_filt.drop(df.filter(regex="precio").columns, axis="columns", inplace=True)
df_filt.drop("horario00",axis="columns", inplace=True)
df_filt.drop("z",axis="columns", inplace=True)
df_filt.drop("fecha",axis="columns", inplace=True)
df_filt.drop(df.filter(regex="f_").columns, axis="columns", inplace=True)
df_filt.drop("objectid",axis="columns", inplace=True)
df_filt.drop("FID",axis="columns", inplace=True)
df_filt.drop("margen",axis="columns", inplace=True)
df_filt.drop("rotulo",axis="columns", inplace=True)
df_filt.drop("tipo_venta",axis="columns", inplace=True)
df_filt.drop("rem_",axis="columns", inplace=True)
df_filt.drop("horario",axis="columns", inplace=True)
df_filt.info()
return df_filt
def get_centermost_point(cluster):
"""
Definicion de la funcion get_centermost_point:
Funcion para que encontrar el punto más cercano al centro
del cluster
Parametros
----------
cluster: Pandas Series
Series que contiene la información del cluster
Returns
------
tuple(centermost_point): Pandas Dataframe
Tupla que contiene el id del cluster y el punto más
cercano al centro del cluster
Ejemplo
-------
>>> centermost_points = clusters.map(get_centermost_point)
"""
centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
centermost_point = min(cluster, key=lambda point: great_circle(point, centroid).m)
return tuple(centermost_point)
def clustering_dbscan(df_filt):
"""
Definicion de la funcion clustering_dbscan:
Funcion para realizar un clustering de tipo DBSCAN
que reduce la dimensión del Dataframe de entrada
de manera geográficamente uniforme
Parametros
----------
df: Pandas Dataframe
Dataframe que contiene el dataset sobre el que se
va a hacer el clustering
Returns
------
rs: Pandas Dataframe
Dataframe que contiene el dataset con dimensión
reducida
Ejemplo
-------
>>> rs = clustering_dbscan(df_filt)
"""
coords = df_filt[["latitud", "longitud"]].values
kms_per_radian = 6371.0088
epsilon = 5 / kms_per_radian
db = DBSCAN(eps=epsilon, min_samples=4, algorithm="ball_tree", metric="haversine").fit(np.radians(coords))
cluster_labels = db.labels_
num_clusters = len(set(cluster_labels))
clusters = pd.Series([coords[cluster_labels == n] for n in range(num_clusters-1)])
print("Number of clusters: {}".format(num_clusters))
centermost_points = clusters.map(get_centermost_point)
lats, lons = zip(*centermost_points)
rep_points = | pd.DataFrame({"longitud":lons, "latitud":lats}) | pandas.DataFrame |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
| Timestamp('2011-08-01 10:00', tz='US/Eastern') | pandas.Timestamp |
import os
import jsonlines
import numpy as np
import pandas as pd
import requests
import tagme
import ujson
from tqdm import tqdm
from bootleg.symbols.entity_profile import EntityProfile
pd.options.display.max_colwidth = 500
def load_train_data(train_file, title_map, entity_profile=None):
"""Loads a jsonl file and creates a pandas DataFrame. Adds candidates, types, and KGs if available."""
num_lines = sum(1 for _ in open(train_file))
rows = []
with jsonlines.open(train_file) as f:
for line in tqdm(f, total=num_lines):
gold_qids = line["qids"]
# for each alias, append a row in the merged result table
for alias_idx in range(len(gold_qids)):
res = {
"sentence": line["sentence"],
"sent_idx": line["sent_idx_unq"],
"aliases": line["aliases"],
"span": line["spans"][alias_idx],
"slices": line.get("slices", {}),
"alias": line["aliases"][alias_idx],
"alias_idx": alias_idx,
"is_gold_label": line["gold"][alias_idx],
"gold_qid": gold_qids[alias_idx],
"gold_title": title_map[gold_qids[alias_idx]]
if gold_qids[alias_idx] != "Q-1"
else "Q-1",
"all_gold_qids": gold_qids,
"gold_label_aliases": [
al
for i, al in enumerate(line["aliases"])
if line["gold"][i] is True
],
"all_is_gold_labels": line["gold"],
"all_spans": line["spans"],
}
slices = []
if "slices" in line:
for sl_name in line["slices"]:
if (
str(alias_idx) in line["slices"][sl_name]
and line["slices"][sl_name][str(alias_idx)] > 0.5
):
slices.append(sl_name)
res["slices"] = slices
if entity_profile is not None:
res["cand_names"] = [
title_map[q[0]]
for i, q in enumerate(
entity_profile.get_qid_count_cands(
line["aliases"][alias_idx]
)
)
]
res["cand_qids"] = [
q[0]
for i, q in enumerate(
entity_profile.get_qid_count_cands(
line["aliases"][alias_idx]
)
)
]
for type_sym in entity_profile.get_all_typesystems():
gold_types = entity_profile.get_types(
gold_qids[alias_idx], type_sym
)
res[f"{type_sym}_gld"] = gold_types
connected_pairs_gld = []
for alias_idx2 in range(len(gold_qids)):
if entity_profile.is_connected(
gold_qids[alias_idx], gold_qids[alias_idx2]
):
connected_pairs_gld.append(gold_qids[alias_idx2])
res[f"kg_gld"] = connected_pairs_gld
rows.append(res)
return pd.DataFrame(rows)
def load_title_map(entity_dir, entity_mapping_dir="entity_mappings"):
return ujson.load(
open(os.path.join(entity_dir, entity_mapping_dir, "qid2title.json"))
)
def load_cand_map(entity_dir, alias_map_file, entity_mapping_dir="entity_mappings"):
return ujson.load(
open(os.path.join(entity_dir, entity_mapping_dir, alias_map_file))
)
def load_predictions(file):
lines = {}
with jsonlines.open(file) as f:
for line in f:
lines[line["sent_idx_unq"]] = line
return lines
def score_predictions(
orig_file, pred_file, title_map, entity_profile: EntityProfile = None
):
"""Loads a jsonl file and joins with the results from dump_preds"""
num_lines = sum(1 for line in open(orig_file))
preds = load_predictions(pred_file)
correct = 0
total = 0
rows = []
with jsonlines.open(orig_file) as f:
for line in tqdm(f, total=num_lines):
sent_idx = line["sent_idx_unq"]
gold_qids = line["qids"]
pred_qids = preds[sent_idx]["qids"]
assert len(gold_qids) == len(
pred_qids
), "Gold and pred QIDs have different lengths"
correct += np.sum(
[
gold_qid == pred_qid
for gold_qid, pred_qid in zip(gold_qids, pred_qids)
]
)
total += len(gold_qids)
# for each alias, append a row in the merged result table
for alias_idx in range(len(gold_qids)):
res = {
"sentence": line["sentence"],
"sent_idx": line["sent_idx_unq"],
"aliases": line["aliases"],
"span": line["spans"][alias_idx],
"slices": line.get("slices", {}),
"alias": line["aliases"][alias_idx],
"alias_idx": alias_idx,
"is_gold_label": line["gold"][alias_idx],
"gold_qid": gold_qids[alias_idx],
"pred_qid": pred_qids[alias_idx],
"gold_title": title_map[gold_qids[alias_idx]]
if gold_qids[alias_idx] != "Q-1"
else "Q-1",
"pred_title": title_map.get(pred_qids[alias_idx], "CouldnotFind")
if pred_qids[alias_idx] != "NC"
else "NC",
"all_gold_qids": gold_qids,
"all_pred_qids": pred_qids,
"gold_label_aliases": [
al
for i, al in enumerate(line["aliases"])
if line["gold"][i] is True
],
"all_is_gold_labels": line["gold"],
"all_spans": line["spans"],
}
slices = []
if "slices" in line:
for sl_name in line["slices"]:
if (
str(alias_idx) in line["slices"][sl_name]
and line["slices"][sl_name][str(alias_idx)] > 0.5
):
slices.append(sl_name)
res["slices"] = slices
if entity_profile is not None:
res["cands"] = [
tuple(
[
title_map[q[0]],
preds[sent_idx]["cand_probs"][alias_idx][i],
]
)
for i, q in enumerate(
entity_profile.get_qid_count_cands(
line["aliases"][alias_idx]
)
)
]
for type_sym in entity_profile.get_all_typesystems():
gold_types = entity_profile.get_types(
gold_qids[alias_idx], type_sym
)
pred_types = entity_profile.get_types(
pred_qids[alias_idx], type_sym
)
res[f"{type_sym}_gld"] = gold_types
res[f"{type_sym}_pred"] = pred_types
connected_pairs_gld = []
connected_pairs_pred = []
for alias_idx2 in range(len(gold_qids)):
if entity_profile.is_connected(
gold_qids[alias_idx], gold_qids[alias_idx2]
):
connected_pairs_gld.append(gold_qids[alias_idx2])
if entity_profile.is_connected(
pred_qids[alias_idx], pred_qids[alias_idx2]
):
connected_pairs_pred.append(pred_qids[alias_idx2])
res[f"kg_gld"] = connected_pairs_gld
res[f"kg_pred"] = connected_pairs_pred
rows.append(res)
return pd.DataFrame(rows)
def load_mentions(file):
lines = []
with jsonlines.open(file) as f:
for line in f:
new_line = {
"sentence": line["sentence"],
"aliases": line.get("aliases", []),
"spans": line.get("spans", []),
}
lines.append(new_line)
return | pd.DataFrame(lines) | pandas.DataFrame |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import pandas
from pandas.core.common import apply_if_callable, is_bool_indexer
import pandas._libs.lib as lib
from pandas.core.dtypes.common import (
is_dict_like,
is_list_like,
is_scalar,
)
import sys
import warnings
from .base import BasePandasDataset
from .iterator import PartitionIterator
from .utils import _inherit_docstrings
from .utils import from_pandas, to_pandas
if sys.version_info[0] == 3 and sys.version_info[1] >= 7:
# Python >= 3.7
from re import Pattern as _pattern_type
else:
# Python <= 3.6
from re import _pattern_type
@_inherit_docstrings(pandas.Series, excluded=[pandas.Series, pandas.Series.__init__])
class Series(BasePandasDataset):
def __init__(
self,
data=None,
index=None,
dtype=None,
name=None,
copy=False,
fastpath=False,
query_compiler=None,
):
"""Constructor for a Series object.
Args:
series_oids ([ObjectID]): The list of remote Series objects.
"""
if isinstance(data, type(self)):
query_compiler = data._query_compiler
if query_compiler is None:
warnings.warn(
"Distributing {} object. This may take some time.".format(type(data))
)
if name is None:
name = "__reduced__"
query_compiler = from_pandas(
pandas.DataFrame(
pandas.Series(
data=data,
index=index,
dtype=dtype,
name=name,
copy=copy,
fastpath=fastpath,
)
)
)._query_compiler
if len(query_compiler.columns) != 1 or (
len(query_compiler.index) == 1 and query_compiler.index[0] == "__reduced__"
):
query_compiler = query_compiler.transpose()
self._query_compiler = query_compiler
def _get_name(self):
name = self._query_compiler.columns[0]
if name == "__reduced__":
return None
return name
def _set_name(self, name):
if name is None:
name = "__reduced__"
self._query_compiler.columns = [name]
name = property(_get_name, _set_name)
_parent = None
def _reduce_dimension(self, query_compiler):
return query_compiler.to_pandas().squeeze()
def _validate_dtypes_sum_prod_mean(self, axis, numeric_only, ignore_axis=False):
return self
def _validate_dtypes_min_max(self, axis, numeric_only):
return self
def _validate_dtypes(self, numeric_only=False):
pass
def _create_or_update_from_compiler(self, new_query_compiler, inplace=False):
"""Returns or updates a DataFrame given new query_compiler"""
assert (
isinstance(new_query_compiler, type(self._query_compiler))
or type(new_query_compiler) in self._query_compiler.__class__.__bases__
), "Invalid Query Compiler object: {}".format(type(new_query_compiler))
if not inplace and (
len(new_query_compiler.columns) == 1 or len(new_query_compiler.index) == 1
):
return Series(query_compiler=new_query_compiler)
elif not inplace:
# This can happen with things like `reset_index` where we can add columns.
from .dataframe import DataFrame
return DataFrame(query_compiler=new_query_compiler)
else:
self._update_inplace(new_query_compiler=new_query_compiler)
def _prepare_inter_op(self, other):
if isinstance(other, Series):
new_self = self.copy()
new_self.name = "__reduced__"
new_other = other.copy()
new_other.name = "__reduced__"
else:
new_self = self
new_other = other
return new_self, new_other
def __add__(self, right):
return self.add(right)
def __radd__(self, left):
return self.add(left)
def __and__(self, other):
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).__and__(new_other)
def __array__(self, dtype=None):
return super(Series, self).__array__(dtype).flatten()
@property
def __array_priority__(self): # pragma: no cover
return self._to_pandas().__array_priority__
def __bytes__(self):
return self._default_to_pandas(pandas.Series.__bytes__)
def __contains__(self, key):
return key in self.index
def __copy__(self, deep=True):
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
return self.copy(deep=True)
def __delitem__(self, key):
if key not in self.keys():
raise KeyError(key)
self.drop(labels=key, inplace=True)
def __div__(self, right):
return self.div(right)
def __rdiv__(self, left):
return self.rdiv(left)
def __divmod__(self, right):
return self.divmod(right)
def __rdivmod__(self, left):
return self.rdivmod(left)
def __float__(self):
return float(self.squeeze())
def __floordiv__(self, right):
return self.floordiv(right)
def __rfloordiv__(self, right):
return self.rfloordiv(right)
def _getitem(self, key):
key = apply_if_callable(key, self)
if isinstance(key, Series) and key.dtype == np.bool:
# This ends up being significantly faster than looping through and getting
# each item individually.
key = key._to_pandas()
if is_bool_indexer(key):
return self.__constructor__(
query_compiler=self._query_compiler.getitem_row_array(
pandas.RangeIndex(len(self.index))[key]
)
)
# TODO: More efficiently handle `tuple` case for `Series.__getitem__`
if isinstance(key, tuple):
return self._default_to_pandas(pandas.Series.__getitem__, key)
else:
if not is_list_like(key):
reduce_dimension = True
key = [key]
else:
reduce_dimension = False
# The check for whether or not `key` is in `keys()` will throw a TypeError
# if the object is not hashable. When that happens, we just use the `iloc`.
try:
if all(k in self.keys() for k in key):
result = self._query_compiler.getitem_row_array(
self.index.get_indexer_for(key)
)
else:
result = self._query_compiler.getitem_row_array(key)
except TypeError:
result = self._query_compiler.getitem_row_array(key)
if reduce_dimension:
return self._reduce_dimension(result)
return self.__constructor__(query_compiler=result)
def __getattr__(self, key):
"""After regular attribute access, looks up the name in the index
Args:
key (str): Attribute name.
Returns:
The value of the attribute.
"""
try:
return object.__getattribute__(self, key)
except AttributeError as e:
if key in self.index:
return self[key]
raise e
def __int__(self):
return int(self.squeeze())
def __iter__(self):
return self._to_pandas().__iter__()
def __mod__(self, right):
return self.mod(right)
def __rmod__(self, left):
return self.rmod(left)
def __mul__(self, right):
return self.mul(right)
def __rmul__(self, left):
return self.rmul(left)
def __or__(self, other):
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).__or__(new_other)
def __pow__(self, right):
return self.pow(right)
def __rpow__(self, left):
return self.rpow(left)
def __repr__(self):
num_rows = pandas.get_option("max_rows") or 60
num_cols = pandas.get_option("max_columns") or 20
temp_df = self._build_repr_df(num_rows, num_cols)
if isinstance(temp_df, pandas.DataFrame):
temp_df = temp_df.iloc[:, 0]
temp_str = repr(temp_df)
if self.name is not None:
name_str = "Name: {}, ".format(str(self.name))
else:
name_str = ""
if len(self.index) > num_rows:
len_str = "Length: {}, ".format(len(self.index))
else:
len_str = ""
dtype_str = "dtype: {}".format(temp_str.rsplit("dtype: ", 1)[-1])
if len(self) == 0:
return "Series([], {}{}".format(name_str, dtype_str)
return temp_str.rsplit("\nName:", 1)[0] + "\n{}{}{}".format(
name_str, len_str, dtype_str
)
def __round__(self, decimals=0):
return self._create_or_update_from_compiler(
self._query_compiler.round(decimals=decimals)
)
def __setitem__(self, key, value):
if key not in self.keys():
raise KeyError(key)
self._create_or_update_from_compiler(
self._query_compiler.setitem(1, key, value), inplace=True
)
def __sub__(self, right):
return self.sub(right)
def __rsub__(self, left):
return self.rsub(left)
def __truediv__(self, right):
return self.truediv(right)
def __rtruediv__(self, left):
return self.rtruediv(left)
__iadd__ = __add__
__imul__ = __add__
__ipow__ = __pow__
__isub__ = __sub__
__itruediv__ = __truediv__
@property
def values(self):
"""Create a numpy array with the values from this Series.
Returns:
The numpy representation of this object.
"""
return super(Series, self).to_numpy().flatten()
def __xor__(self, other):
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).__xor__(new_other)
def add(self, other, level=None, fill_value=None, axis=0):
new_self, new_other = self._prepare_inter_op(other)
return super(Series, new_self).add(
new_other, level=level, fill_value=fill_value, axis=axis
)
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new Series containing the new column names.
"""
return Series(query_compiler=self._query_compiler.add_prefix(prefix, axis=0))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return Series(query_compiler=self._query_compiler.add_suffix(suffix, axis=0))
def append(self, to_append, ignore_index=False, verify_integrity=False):
"""Append another DataFrame/list/Series to this one.
Args:
to_append: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
from .dataframe import DataFrame
bad_type_msg = (
'cannot concatenate object of type "{}"; only pd.Series, '
"pd.DataFrame, and pd.Panel (deprecated) objs are valid"
)
if isinstance(to_append, list):
if not all(isinstance(o, BasePandasDataset) for o in to_append):
raise TypeError(
bad_type_msg.format(
type(
next(
o
for o in to_append
if not isinstance(o, BasePandasDataset)
)
)
)
)
elif all(isinstance(o, Series) for o in to_append):
self.name = None
for i in range(len(to_append)):
to_append[i].name = None
to_append[i] = to_append[i]._query_compiler
else:
# Matching pandas behavior of naming the Series columns 0
self.name = 0
for i in range(len(to_append)):
if isinstance(to_append[i], Series):
to_append[i].name = 0
to_append[i] = DataFrame(to_append[i])
return DataFrame(self.copy()).append(
to_append,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
)
elif isinstance(to_append, Series):
self.name = None
to_append.name = None
to_append = [to_append._query_compiler]
elif isinstance(to_append, DataFrame):
self.name = 0
return DataFrame(self.copy()).append(
to_append, ignore_index=ignore_index, verify_integrity=verify_integrity
)
else:
raise TypeError(bad_type_msg.format(type(to_append)))
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = (
self.index.append(to_append.index)
if not isinstance(to_append, list)
else self.index.append([o.index for o in to_append])
)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, to_append, ignore_index=ignore_index, sort=None
)
if len(query_compiler.columns) > 1:
return DataFrame(query_compiler=query_compiler)
else:
return Series(query_compiler=query_compiler)
def apply(self, func, convert_dtype=True, args=(), **kwds):
# apply and aggregate have slightly different behaviors, so we have to use
# each one separately to determine the correct return type. In the case of
# `agg`, the axis is set, but it is not required for the computation, so we use
# it to determine which function to run.
if kwds.pop("axis", None) is not None:
apply_func = "agg"
else:
apply_func = "apply"
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `self.__name__` for the return
# type.
# Because a `Series` cannot be empty in pandas, we create a "dummy" `Series` to
# do the error checking and determining the return type.
try:
return_type = type(
getattr( | pandas.Series([""], index=self.index[:1]) | pandas.Series |
"""
pip install mysql-connector-python
pip install pandas
pip install numpy
"""
# libs
from db_works import db_connect, db_tables
import pandas as pd
import numpy as np
import pandas_ta as pta # https://mrjbq7.github.io/ta-lib/
import talib as ta # install from whl file < https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib
import json
db_schema_name, db_table_name, db_settings_table_name = db_tables()
cursor, cnxn = db_connect()
TACTICS_PACK_SIZE = 50000
# todo: not need to use all params. just use download_settings_id
# todo: combination table. Can be stored in other schema
def get_combination():
cursor.execute("SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange FROM " + db_schema_name + ".vw_tactics_tests_to_analyse where tactic_status_id = 0 limit 1")
download_setting = cursor.fetchall()
if len(download_setting) > 0:
download_settings_id = download_setting[0][0]
market = download_setting[0][1]
tick_interval = download_setting[0][2]
data_granulation = download_setting[0][3]
stock_type = download_setting[0][4]
stock_exchange = download_setting[0][5]
print("select done")
else:
print("no data to download")
exit()
return download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange
download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange = get_combination()
print(download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange)
# download OHLC data from DWH
def get_ohlc_data():
cursor.execute("SELECT * FROM " + db_schema_name + ".vw_binance_klines_anl where market = '"+market+"' and "
"tick_interval = '" + tick_interval + "' and "
"data_granulation = '"+ data_granulation + "' and "
"stock_type = '" + stock_type + "' and "
"stock_exchange = '" + stock_exchange + "' ")
df = pd.DataFrame(cursor.fetchall())
df_bak = df.copy() # absolutly needed. Simple assignment doesn't work
print("data ready")
return df, df_bak
df, df_bak = get_ohlc_data()
print(df)
def get_tactics_to_check():
cursor.execute("SELECT tactic_id, download_settings_id, test_stake, buy_indicator_1_name, buy_indicator_1_value, yield_expected, wait_periods "
"FROM " + db_schema_name + ".vw_tactics_tests_to_analyse where tactic_status_id = 0 and download_settings_id = "+ str(download_settings_id) + " limit " + str(TACTICS_PACK_SIZE) +" ")
tactics_data = cursor.fetchall()
return tactics_data
tactics_data = get_tactics_to_check()
def get_test_result(test_stake_in, test_indicator_buy_1_in, test_indicator_value_1_in, test_yield_expect_in, test_wait_periods_in):
df.columns =["open_time",
"open",
"high",
"low",
"close",
"volume",
"close_time",
"quote_asset_volume",
"number_of_trades",
"taker_buy_base_asset_volume",
"taker_buy_quote_asset_volume",
"ignore",
"market",
"tick_interval",
"data_granulation",
"stock_type",
"stock_exchange",
"insert_timestamp",
"open_datetime",
"close_datetime"]
# basics
df["open_time_dt"] = pd.to_datetime(df["open_datetime"], unit='ms')
df["open_time_yr"] = df["open_time_dt"].dt.year
df["open_time_mnt"] = df["open_time_dt"].dt.month
df["open_time_dy"] = df["open_time_dt"].dt.day
df["change_val"] = df.close - df.open
df["change_perc"] = df.close / df.open - 1
df["amplitude_val"] = df.high - df.low
df["amplitude_perc"] = df.high - df.low / df.open
df["up_down"] = np.where(df["close"] - df["close"].shift(1) > 0, 1, -1)
# token: trend up/down 1 / -1
# definition: in custom period sums of change are up or down.
# you an combine it with ADX - trend strength by multiply both ie. -1 * 40
df["token_change_7"] = df["change_val"].rolling(7).sum()
df["token_trend_7"] = np.where(df["token_change_7"] > 0, 1, -1)
df["token_change_14"] = df["change_val"].rolling(14).sum() # oryginal
df["token_trend_14"] = np.where(df["token_change_14"] > 0, 1, -1)
df["token_change_50"] = df["change_val"].rolling(50).sum()
df["token_trend_50"] = np.where(df["token_change_50"] > 0, 1, -1)
df["token_change_100"] = df["change_val"].rolling(100).sum()
df["token_trend_100"] = np.where(df["token_change_100"] > 0, 1, -1)
# indicators
# Moving averages
df["sma_7"] = pta.sma(df["close"], length=7)
df["sma_25"] = pta.sma(df["close"], length=25)
df["sma_99"] = pta.sma(df["close"], length=99)
df["wma_7"] = pta.wma(df["close"], length=7)
df["wma_25"] = pta.wma(df["close"], length=25)
df["wma_99"] = pta.wma(df["close"], length=99)
df["ema_7"] = pta.ema(df["close"], length=7)
df["ema_25"] = pta.ema(df["close"], length=25)
df["ema_99"] = pta.ema(df["close"], length=99)
# MACD's
# oscilators
# RSI
df["rsi_6"] = ta.RSI(df["close"], timeperiod=6) # tradingview corr, ok, checked
df["rsi_10"] = ta.RSI(df["close"], timeperiod=10) # tradingview corr, ok, checked
df["rsi_12"] = ta.RSI(df["close"], timeperiod=12) # tradingview corr, ok, checked
df["rsi_14"] = ta.RSI(df["close"], timeperiod=14) # tradingview corr, ok, checked
df["rsi_20"] = ta.RSI(df["close"], timeperiod=20) # tradingview corr, ok, checked
df["rsi_24"] = ta.RSI(df["close"], timeperiod=24) # tradingview corr, ok, checked
# Williams %R
df["will_perc_r_10"] = pta.willr(df["high"], df["low"], df["close"], 10) # tradingview corr
df["will_perc_r_14"] = pta.willr(df["high"], df["low"], df["close"], 14) # tradingview corr
# CCI
df["cci_14"] = pta.willr(df["high"], df["low"], df["close"], 14) # tradingview corr ------------------------------this isn't cci, < willr function
# ROC - rate of change
df["roc_5"] = pta.roc(df["close"], 5) # trandingview, ok, checked
df["roc_6"] = pta.roc(df["close"], 6) # trandingview, ok, checked
df["roc_9"] = pta.roc(df["close"], 9) # trandingview, ok, checked
df["roc_10"] = pta.roc(df["close"], 10) # trandingview typical, ok, checked
df["roc_12"] = pta.roc(df["close"], 12) # trandingview typical, ok, checked
df["roc_14"] = pta.roc(df["close"], 14) # trandingview typical, ok, checked
df["roc_24"] = pta.roc(df["close"], 24) # trandingview typical, ok, checked
# MFI - money flow index
df["mfi_7"] = pta.mfi(df["high"], df["low"], df["close"], df["volume"]) # trandingview, ok, checked
df["mfi_14"] = pta.mfi(df["high"], df["low"], df["close"], df["volume"]) # trandingview, ok, checked
# stoch - stochastic oscilator
df["stoch_slowk_5_3"], df["stoch_slowd_5_3"] = ta.STOCH(df["high"], df["low"], df["close"]) # ta-lib standard
df["stoch_fastk_5_3"], df["stoch_fastd_5_3"] = ta.STOCHF(df["high"], df["low"], df["close"]) # ta-lib standard
df["stoch_slowk_14_3"], df["stoch_slowd_14_3"] = ta.STOCH(df["high"], df["low"], df["close"])
df["stoch_fastk_14_3"], df["stoch_fastd_14_3"] = ta.STOCHF(df["high"], df["low"], df["close"], fastk_period=14) # tradingview ok, checked
# StochRSI
#df["stoch_rsi"] = pta.sto
# ADX Average directional movement index
df["adx_7"] = ta.ADX(df["high"], df["low"], df["close"], timeperiod=7)
df["adx_14"] = ta.ADX(df["high"], df["low"], df["close"]) # standard
df["adx_50"] = ta.ADX(df["high"], df["low"], df["close"], timeperiod=50)
df["adx_100"] = ta.ADX(df["high"], df["low"], df["close"], timeperiod=100)
df["adxr_14"] = ta.ADXR(df["high"], df["low"], df["close"]) # standard
# token mod: Trend strength index
df["token_tsi_14"] = df["token_trend_14"] * df["adx_14"]
# volume indicators
# OBV - On Balance Volume
df["obv"] = pta.obv(df["close"], df["volume"])
# candles
df["cdl_doji"] = pta.cdl_doji(df["open"], df["high"], df["low"], df["close"])
df["cdl_dragonfly_doji"] = ta.CDLDRAGONFLYDOJI(df["open"], df["high"], df["low"], df["close"])
df["cdl_hammer"] = ta.CDLHAMMER(df["open"], df["high"], df["low"], df["close"])
df["cdl_marubozu"] = ta.CDLMARUBOZU(df["open"], df["high"], df["low"], df["close"])
df["cdl_longline"] = ta.CDLLONGLINE(df["open"], df["high"], df["low"], df["close"])
df["cdl_longlinedoji"] = ta.CDLLONGLEGGEDDOJI(df["open"], df["high"], df["low"], df["close"])
df["cdl_takuri"] = ta.CDLTAKURI(df["open"], df["high"], df["low"], df["close"])
df["cdl_3white_soldiers"] = ta.CDL3WHITESOLDIERS(df["open"], df["high"], df["low"], df["close"])
# combined token
# RSI rise 1 period
#df["rsi_6_rise_1_period"] = np.where(df["rsi_6"] > df["rsi_6"].shift(1), 1, 0)
# MFI raise 1 period
#df["mfi_7_rise_1_period"] = np.where(df["mfi_7"] > df["mfi_7"].shift(1), 1, 0)
# volume twice, two green after red
df["volume_twice_two_green_after_red"] = np.where((df["up_down"] == 1)
& (df["up_down"].shift(1) == 1)
& (df["up_down"].shift(2) == 0)
& (df["volume"] / df["volume"].shift(1) >= 1.2), 1, 0)
# TESTS strategies
# TESTS strategies
# TESTS strategies
# print(df)
test_stake = int(test_stake_in)
test_indicator_buy_1 = test_indicator_buy_1_in
#test_indicator_buy_2 = "token_trend_50"
#test_indicator_buy_3 = "token_trend_100"
#test_indicator_buy_4 = "adx_7"
test_indicator_value_1 = test_indicator_value_1_in
#test_indicator_value_2 = 1
#test_indicator_value_3 = 1
#test_indicator_value_4 = 40
test_yield_expect = test_yield_expect_in # ie. 0.01=1%
test_wait_periods = test_wait_periods_in # ie. try to sell in next 6 periods (or 10)
test_stoploss = -0.05 # must be minus
test_stock_fee = -0.0015 # must be minus
df["tst_is_buy_signal"] = np.where((df[test_indicator_buy_1] < test_indicator_value_1)
# & (df[test_indicator_buy_2] < test_indicator_value_2)
# & (df[test_indicator_buy_3] < test_indicator_value_3)
# & (df[test_indicator_buy_4] > test_indicator_value_4)
, 1, 0)
df["tst_sell_price"] = df["close"] * test_yield_expect + df["close"]
df["tst_sell_stoploss_price"] = df["close"] + df["close"] * test_stoploss # must be plus
df["tst_high_in_sell_period"] = df["high"].rolling(test_wait_periods).max().shift(-test_wait_periods)
df["tst_low_in_sell_period"] = df["low"].rolling(test_wait_periods).min().shift(-test_wait_periods)
df["tst_sell_after_yield"] = np.where(df['tst_high_in_sell_period'] >= df["tst_sell_price"], 1, 0)
df["tst_sell_after_stoploss"] = np.where(df['tst_low_in_sell_period'] <= df["tst_sell_stoploss_price"], 1, 0)
df["tst_sold_price"] = np.where(df['tst_sell_after_yield'] == 1, df["tst_sell_price"], df["close"].shift(-1 * test_wait_periods)) # market after time
df["tst_sold_diff_perc"] = df["tst_sold_price"] / df["close"]
df["tst_single_game_result"] = np.where(df['tst_sold_diff_perc'] > 1, 1, -1)
df["tst_buy_sell_fee"] = test_stake * test_stock_fee # todo: change later, but accuracy is good
df["tst_single_game_earn"] = test_stake * df["tst_sold_diff_perc"] - test_stake
df["tst_single_game_earn_minus_fees"] = (test_stake * df["tst_sold_diff_perc"] - test_stake) + df["tst_buy_sell_fee"]
# todo: single game result with stoploss. Need improvement
df["tst_single_game_earn_minus_fees_with_stoploss"] = np.where(df['tst_sell_after_stoploss'] == 1 , test_stake * test_stoploss + df["tst_buy_sell_fee"], df["tst_single_game_earn_minus_fees"])
# test_name = "tst_" & market & "_" & tick_interval & "_" & test_indicator_buy_1
# print(df.info(verbose=True))
# last check
#print(df)
# df2 aggr
df2 = df[df["tst_is_buy_signal"] == 1].groupby(["open_time_yr", "open_time_mnt"]).\
aggregate({"tst_is_buy_signal": "sum",
#"tst_single_game_earn": "sum",
"tst_single_game_earn_minus_fees": "sum"
#"tst_single_game_earn_minus_fees_with_stoploss": "sum"
})
# print(df2)
df3 = df[df["tst_is_buy_signal"] == 1].groupby(["open_time_yr"]).\
aggregate({"tst_is_buy_signal": "sum",
#"tst_single_game_earn": "sum",
"tst_single_game_earn_minus_fees": "sum"
#"tst_single_game_earn_minus_fees_with_stoploss": "sum"
})
# print(df3)
# statistics
df4 = df[df["tst_is_buy_signal"] == 1].aggregate({"tst_is_buy_signal": "sum",
#"tst_single_game_earn": "sum",
"tst_single_game_earn_minus_fees": "sum"
#"tst_single_game_earn_minus_fees_with_stoploss": "sum"
})
# print(df4)
# jsons with results
result_string_1 = pd.DataFrame.to_json(df2)
result_string_2 = pd.DataFrame.to_json(df3)
result_string_3 = | pd.DataFrame.to_json(df4) | pandas.DataFrame.to_json |
import importlib
import inspect
import os
import warnings
from unittest.mock import patch
import cloudpickle
import numpy as np
import pandas as pd
import pytest
from skopt.space import Categorical
from evalml.exceptions import (
ComponentNotYetFittedError,
EnsembleMissingPipelinesError,
MethodPropertyNotFoundError,
)
from evalml.model_family import ModelFamily
from evalml.pipelines import BinaryClassificationPipeline
from evalml.pipelines.components import (
LSA,
PCA,
ARIMARegressor,
BaselineClassifier,
BaselineRegressor,
CatBoostClassifier,
CatBoostRegressor,
ComponentBase,
DateTimeFeaturizer,
DFSTransformer,
DropColumns,
DropNullColumns,
DropRowsTransformer,
ElasticNetClassifier,
ElasticNetRegressor,
Estimator,
ExtraTreesClassifier,
ExtraTreesRegressor,
Imputer,
LightGBMClassifier,
LightGBMRegressor,
LinearDiscriminantAnalysis,
LinearRegressor,
LogisticRegressionClassifier,
NaturalLanguageFeaturizer,
OneHotEncoder,
Oversampler,
PerColumnImputer,
PolynomialDetrender,
ProphetRegressor,
RandomForestClassifier,
RandomForestRegressor,
RFClassifierSelectFromModel,
RFRegressorSelectFromModel,
SelectByType,
SelectColumns,
SimpleImputer,
StandardScaler,
SVMClassifier,
SVMRegressor,
TargetImputer,
TimeSeriesBaselineEstimator,
TimeSeriesFeaturizer,
Transformer,
Undersampler,
XGBoostClassifier,
XGBoostRegressor,
)
from evalml.pipelines.components.ensemble import (
StackedEnsembleBase,
StackedEnsembleClassifier,
StackedEnsembleRegressor,
)
from evalml.pipelines.components.estimators.classifiers.vowpal_wabbit_classifiers import (
VowpalWabbitBinaryClassifier,
VowpalWabbitMulticlassClassifier,
)
from evalml.pipelines.components.estimators.regressors.vowpal_wabbit_regressor import (
VowpalWabbitRegressor,
)
from evalml.pipelines.components.transformers.encoders.label_encoder import (
LabelEncoder,
)
from evalml.pipelines.components.transformers.preprocessing.log_transformer import (
LogTransformer,
)
from evalml.pipelines.components.transformers.samplers.base_sampler import (
BaseSampler,
)
from evalml.pipelines.components.utils import (
_all_estimators,
_all_transformers,
all_components,
generate_component_code,
)
from evalml.problem_types import ProblemTypes
@pytest.fixture(scope="module")
def test_classes():
class MockComponent(ComponentBase):
name = "Mock Component"
modifies_features = True
modifies_target = False
training_only = False
class MockEstimator(Estimator):
name = "Mock Estimator"
model_family = ModelFamily.LINEAR_MODEL
supported_problem_types = ["binary"]
class MockTransformer(Transformer):
name = "Mock Transformer"
def transform(self, X, y=None):
return X
return MockComponent, MockEstimator, MockTransformer
@pytest.fixture(scope="module")
def test_estimator_needs_fitting_false():
class MockEstimatorNeedsFittingFalse(Estimator):
name = "Mock Estimator Needs Fitting False"
model_family = ModelFamily.LINEAR_MODEL
supported_problem_types = ["binary"]
needs_fitting = False
def predict(self, X):
pass
return MockEstimatorNeedsFittingFalse
class MockFitComponent(ComponentBase):
name = "Mock Fit Component"
modifies_features = True
modifies_target = False
training_only = False
def __init__(self, param_a=2, param_b=10, random_seed=0):
parameters = {"param_a": param_a, "param_b": param_b}
super().__init__(parameters=parameters, component_obj=None, random_seed=0)
def fit(self, X, y=None):
pass
def predict(self, X):
return np.array(
[self.parameters["param_a"] * 2, self.parameters["param_b"] * 10]
)
def test_init(test_classes):
MockComponent, MockEstimator, MockTransformer = test_classes
assert MockComponent().name == "Mock Component"
assert MockEstimator().name == "Mock Estimator"
assert MockTransformer().name == "Mock Transformer"
def test_describe(test_classes):
MockComponent, MockEstimator, MockTransformer = test_classes
params = {"param_a": "value_a", "param_b": 123}
component = MockComponent(parameters=params)
assert component.describe(return_dict=True) == {
"name": "Mock Component",
"parameters": params,
}
estimator = MockEstimator(parameters=params)
assert estimator.describe(return_dict=True) == {
"name": "Mock Estimator",
"parameters": params,
}
transformer = MockTransformer(parameters=params)
assert transformer.describe(return_dict=True) == {
"name": "Mock Transformer",
"parameters": params,
}
def test_describe_component():
enc = OneHotEncoder()
imputer = Imputer()
simple_imputer = SimpleImputer("mean")
column_imputer = PerColumnImputer({"a": "mean", "b": ("constant", 100)})
scaler = StandardScaler()
feature_selection_clf = RFClassifierSelectFromModel(
n_estimators=10, number_features=5, percent_features=0.3, threshold=-np.inf
)
feature_selection_reg = RFRegressorSelectFromModel(
n_estimators=10, number_features=5, percent_features=0.3, threshold=-np.inf
)
drop_col_transformer = DropColumns(columns=["col_one", "col_two"])
drop_null_transformer = DropNullColumns()
datetime = DateTimeFeaturizer()
natural_language_featurizer = NaturalLanguageFeaturizer()
lsa = LSA()
pca = PCA()
lda = LinearDiscriminantAnalysis()
ft = DFSTransformer()
us = Undersampler()
assert enc.describe(return_dict=True) == {
"name": "One Hot Encoder",
"parameters": {
"top_n": 10,
"features_to_encode": None,
"categories": None,
"drop": "if_binary",
"handle_unknown": "ignore",
"handle_missing": "error",
},
}
assert imputer.describe(return_dict=True) == {
"name": "Imputer",
"parameters": {
"categorical_impute_strategy": "most_frequent",
"categorical_fill_value": None,
"numeric_impute_strategy": "mean",
"numeric_fill_value": None,
},
}
assert simple_imputer.describe(return_dict=True) == {
"name": "Simple Imputer",
"parameters": {"impute_strategy": "mean", "fill_value": None},
}
assert column_imputer.describe(return_dict=True) == {
"name": "Per Column Imputer",
"parameters": {
"impute_strategies": {"a": "mean", "b": ("constant", 100)},
"default_impute_strategy": "most_frequent",
},
}
assert scaler.describe(return_dict=True) == {
"name": "Standard Scaler",
"parameters": {},
}
assert feature_selection_clf.describe(return_dict=True) == {
"name": "RF Classifier Select From Model",
"parameters": {
"number_features": 5,
"n_estimators": 10,
"max_depth": None,
"percent_features": 0.3,
"threshold": -np.inf,
"n_jobs": -1,
},
}
assert feature_selection_reg.describe(return_dict=True) == {
"name": "RF Regressor Select From Model",
"parameters": {
"number_features": 5,
"n_estimators": 10,
"max_depth": None,
"percent_features": 0.3,
"threshold": -np.inf,
"n_jobs": -1,
},
}
assert drop_col_transformer.describe(return_dict=True) == {
"name": "Drop Columns Transformer",
"parameters": {"columns": ["col_one", "col_two"]},
}
assert drop_null_transformer.describe(return_dict=True) == {
"name": "Drop Null Columns Transformer",
"parameters": {"pct_null_threshold": 1.0},
}
assert datetime.describe(return_dict=True) == {
"name": "DateTime Featurization Component",
"parameters": {
"features_to_extract": ["year", "month", "day_of_week", "hour"],
"encode_as_categories": False,
"date_index": None,
},
}
assert natural_language_featurizer.describe(return_dict=True) == {
"name": "Natural Language Featurization Component",
"parameters": {},
}
assert lsa.describe(return_dict=True) == {
"name": "LSA Transformer",
"parameters": {},
}
assert pca.describe(return_dict=True) == {
"name": "PCA Transformer",
"parameters": {"n_components": None, "variance": 0.95},
}
assert lda.describe(return_dict=True) == {
"name": "Linear Discriminant Analysis Transformer",
"parameters": {"n_components": None},
}
assert ft.describe(return_dict=True) == {
"name": "DFS Transformer",
"parameters": {"index": "index"},
}
assert us.describe(return_dict=True) == {
"name": "Undersampler",
"parameters": {
"sampling_ratio": 0.25,
"sampling_ratio_dict": None,
"min_samples": 100,
"min_percentage": 0.1,
},
}
try:
oversampler = Oversampler()
assert oversampler.describe(return_dict=True) == {
"name": "Oversampler",
"parameters": {
"sampling_ratio": 0.25,
"sampling_ratio_dict": None,
"k_neighbors_default": 5,
"n_jobs": -1,
},
}
except ImportError:
pass
# testing estimators
base_classifier = BaselineClassifier()
base_regressor = BaselineRegressor()
lr_classifier = LogisticRegressionClassifier()
en_classifier = ElasticNetClassifier()
en_regressor = ElasticNetRegressor()
et_classifier = ExtraTreesClassifier(n_estimators=10, max_features="auto")
et_regressor = ExtraTreesRegressor(n_estimators=10, max_features="auto")
rf_classifier = RandomForestClassifier(n_estimators=10, max_depth=3)
rf_regressor = RandomForestRegressor(n_estimators=10, max_depth=3)
linear_regressor = LinearRegressor()
svm_classifier = SVMClassifier()
svm_regressor = SVMRegressor()
assert base_classifier.describe(return_dict=True) == {
"name": "Baseline Classifier",
"parameters": {"strategy": "mode"},
}
assert base_regressor.describe(return_dict=True) == {
"name": "Baseline Regressor",
"parameters": {"strategy": "mean"},
}
assert lr_classifier.describe(return_dict=True) == {
"name": "Logistic Regression Classifier",
"parameters": {
"penalty": "l2",
"C": 1.0,
"n_jobs": -1,
"multi_class": "auto",
"solver": "lbfgs",
},
}
assert en_classifier.describe(return_dict=True) == {
"name": "Elastic Net Classifier",
"parameters": {
"C": 1.0,
"l1_ratio": 0.15,
"n_jobs": -1,
"multi_class": "auto",
"solver": "saga",
"penalty": "elasticnet",
},
}
assert en_regressor.describe(return_dict=True) == {
"name": "Elastic Net Regressor",
"parameters": {
"alpha": 0.0001,
"l1_ratio": 0.15,
"max_iter": 1000,
"normalize": False,
},
}
assert et_classifier.describe(return_dict=True) == {
"name": "Extra Trees Classifier",
"parameters": {
"n_estimators": 10,
"max_features": "auto",
"max_depth": 6,
"min_samples_split": 2,
"min_weight_fraction_leaf": 0.0,
"n_jobs": -1,
},
}
assert et_regressor.describe(return_dict=True) == {
"name": "Extra Trees Regressor",
"parameters": {
"n_estimators": 10,
"max_features": "auto",
"max_depth": 6,
"min_samples_split": 2,
"min_weight_fraction_leaf": 0.0,
"n_jobs": -1,
},
}
assert rf_classifier.describe(return_dict=True) == {
"name": "Random Forest Classifier",
"parameters": {"n_estimators": 10, "max_depth": 3, "n_jobs": -1},
}
assert rf_regressor.describe(return_dict=True) == {
"name": "Random Forest Regressor",
"parameters": {"n_estimators": 10, "max_depth": 3, "n_jobs": -1},
}
assert linear_regressor.describe(return_dict=True) == {
"name": "Linear Regressor",
"parameters": {"fit_intercept": True, "normalize": False, "n_jobs": -1},
}
assert svm_classifier.describe(return_dict=True) == {
"name": "SVM Classifier",
"parameters": {
"C": 1.0,
"kernel": "rbf",
"gamma": "auto",
"probability": True,
},
}
assert svm_regressor.describe(return_dict=True) == {
"name": "SVM Regressor",
"parameters": {"C": 1.0, "kernel": "rbf", "gamma": "auto"},
}
try:
xgb_classifier = XGBoostClassifier(
eta=0.1, min_child_weight=1, max_depth=3, n_estimators=75
)
xgb_regressor = XGBoostRegressor(
eta=0.1, min_child_weight=1, max_depth=3, n_estimators=75
)
assert xgb_classifier.describe(return_dict=True) == {
"name": "XGBoost Classifier",
"parameters": {
"eta": 0.1,
"max_depth": 3,
"min_child_weight": 1,
"n_estimators": 75,
"n_jobs": 12,
"eval_metric": "logloss",
},
}
assert xgb_regressor.describe(return_dict=True) == {
"name": "XGBoost Regressor",
"parameters": {
"eta": 0.1,
"max_depth": 3,
"min_child_weight": 1,
"n_estimators": 75,
"n_jobs": 12,
},
}
except ImportError:
pass
try:
cb_classifier = CatBoostClassifier()
cb_regressor = CatBoostRegressor()
assert cb_classifier.describe(return_dict=True) == {
"name": "CatBoost Classifier",
"parameters": {
"allow_writing_files": False,
"n_estimators": 10,
"eta": 0.03,
"max_depth": 6,
"bootstrap_type": None,
"silent": True,
"n_jobs": -1,
},
}
assert cb_regressor.describe(return_dict=True) == {
"name": "CatBoost Regressor",
"parameters": {
"allow_writing_files": False,
"n_estimators": 10,
"eta": 0.03,
"max_depth": 6,
"bootstrap_type": None,
"silent": False,
"n_jobs": -1,
},
}
except ImportError:
pass
try:
lg_classifier = LightGBMClassifier()
lg_regressor = LightGBMRegressor()
assert lg_classifier.describe(return_dict=True) == {
"name": "LightGBM Classifier",
"parameters": {
"boosting_type": "gbdt",
"learning_rate": 0.1,
"n_estimators": 100,
"max_depth": 0,
"num_leaves": 31,
"min_child_samples": 20,
"n_jobs": -1,
"bagging_fraction": 0.9,
"bagging_freq": 0,
},
}
assert lg_regressor.describe(return_dict=True) == {
"name": "LightGBM Regressor",
"parameters": {
"boosting_type": "gbdt",
"learning_rate": 0.1,
"n_estimators": 20,
"max_depth": 0,
"num_leaves": 31,
"min_child_samples": 20,
"n_jobs": -1,
"bagging_fraction": 0.9,
"bagging_freq": 0,
},
}
except ImportError:
pass
try:
prophet_regressor = ProphetRegressor()
assert prophet_regressor.describe(return_dict=True) == {
"name": "Prophet Regressor",
"parameters": {
"changepoint_prior_scale": 0.05,
"date_index": None,
"holidays_prior_scale": 10,
"seasonality_mode": "additive",
"seasonality_prior_scale": 10,
"stan_backend": "CMDSTANPY",
},
}
except ImportError:
pass
try:
vw_binary_classifier = VowpalWabbitBinaryClassifier(
loss_function="classic",
learning_rate=0.1,
decay_learning_rate=1.0,
power_t=0.1,
passes=1,
)
vw_multi_classifier = VowpalWabbitMulticlassClassifier(
loss_function="classic",
learning_rate=0.1,
decay_learning_rate=1.0,
power_t=0.1,
passes=1,
)
vw_regressor = VowpalWabbitRegressor(
learning_rate=0.1, decay_learning_rate=1.0, power_t=0.1, passes=1
)
assert vw_binary_classifier.describe(return_dict=True) == {
"name": "Vowpal Wabbit Binary Classifier",
"parameters": {
"loss_function": "classic",
"learning_rate": 0.1,
"decay_learning_rate": 1.0,
"power_t": 0.1,
"passes": 1,
},
}
assert vw_multi_classifier.describe(return_dict=True) == {
"name": "Vowpal Wabbit Multiclass Classifier",
"parameters": {
"loss_function": "classic",
"learning_rate": 0.1,
"decay_learning_rate": 1.0,
"power_t": 0.1,
"passes": 1,
},
}
assert vw_regressor.describe(return_dict=True) == {
"name": "Vowpal Wabbit Regressor",
"parameters": {
"learning_rate": 0.1,
"decay_learning_rate": 1.0,
"power_t": 0.1,
"passes": 1,
},
}
except ImportError:
pass
def test_missing_attributes(X_y_binary):
class MockComponentName(ComponentBase):
pass
with pytest.raises(TypeError):
MockComponentName()
class MockComponentModelFamily(ComponentBase):
name = "Mock Component"
with pytest.raises(TypeError):
MockComponentModelFamily()
class MockEstimatorWithoutAttribute(Estimator):
name = "Mock Estimator"
model_family = ModelFamily.LINEAR_MODEL
with pytest.raises(TypeError):
MockEstimatorWithoutAttribute()
def test_missing_methods_on_components(X_y_binary, test_classes):
X, y = X_y_binary
MockComponent, MockEstimator, MockTransformer = test_classes
component = MockComponent()
with pytest.raises(
MethodPropertyNotFoundError,
match="Component requires a fit method or a component_obj that implements fit",
):
component.fit(X)
estimator = MockEstimator()
estimator._is_fitted = True
with pytest.raises(
MethodPropertyNotFoundError,
match="Estimator requires a predict method or a component_obj that implements predict",
):
estimator.predict(X)
with pytest.raises(
MethodPropertyNotFoundError,
match="Estimator requires a predict_proba method or a component_obj that implements predict_proba",
):
estimator.predict_proba(X)
with pytest.raises(
MethodPropertyNotFoundError,
match="Estimator requires a feature_importance property or a component_obj that implements feature_importances_",
):
estimator.feature_importance
transformer = MockTransformer()
transformer._is_fitted = True
with pytest.raises(
MethodPropertyNotFoundError,
match="Component requires a fit method or a component_obj that implements fit",
):
transformer.fit(X, y)
transformer.transform(X)
with pytest.raises(
MethodPropertyNotFoundError,
match="Component requires a fit method or a component_obj that implements fit",
):
transformer.fit_transform(X)
def test_component_fit(X_y_binary):
X, y = X_y_binary
class MockEstimator:
def fit(self, X, y):
pass
class MockComponent(Estimator):
name = "Mock Estimator"
model_family = ModelFamily.LINEAR_MODEL
supported_problem_types = ["binary"]
hyperparameter_ranges = {}
def __init__(self):
parameters = {}
est = MockEstimator()
super().__init__(parameters=parameters, component_obj=est, random_seed=0)
est = MockComponent()
assert isinstance(est.fit(X, y), ComponentBase)
def test_component_fit_transform(X_y_binary):
X, y = X_y_binary
class MockTransformerWithFitTransform(Transformer):
name = "Mock Transformer"
hyperparameter_ranges = {}
def fit_transform(self, X, y=None):
return X
def transform(self, X, y=None):
return X
def __init__(self):
parameters = {}
super().__init__(parameters=parameters, component_obj=None, random_seed=0)
class MockTransformerWithFitTransformButError(Transformer):
name = "Mock Transformer"
hyperparameter_ranges = {}
def fit_transform(self, X, y=None):
raise RuntimeError
def transform(self, X, y=None):
return X
def __init__(self):
parameters = {}
super().__init__(parameters=parameters, component_obj=None, random_seed=0)
class MockTransformerWithFitAndTransform(Transformer):
name = "Mock Transformer"
hyperparameter_ranges = {}
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X
def __init__(self):
parameters = {}
super().__init__(parameters=parameters, component_obj=None, random_seed=0)
# convert data to pd DataFrame, because the component classes don't
# standardize to pd DataFrame
X = pd.DataFrame(X)
y = pd.Series(y)
component = MockTransformerWithFitTransform()
assert isinstance(component.fit_transform(X, y), pd.DataFrame)
component = MockTransformerWithFitTransformButError()
with pytest.raises(RuntimeError):
component.fit_transform(X, y)
component = MockTransformerWithFitAndTransform()
assert isinstance(component.fit_transform(X, y), pd.DataFrame)
def test_model_family_components(test_classes):
_, MockEstimator, _ = test_classes
assert MockEstimator.model_family == ModelFamily.LINEAR_MODEL
def test_regressor_call_predict_proba(test_classes):
X = np.array([])
_, MockEstimator, _ = test_classes
component = MockEstimator()
component._is_fitted = True
with pytest.raises(MethodPropertyNotFoundError):
component.predict_proba(X)
def test_component_describe(test_classes, caplog):
MockComponent, _, _ = test_classes
component = MockComponent()
component.describe(print_name=True)
out = caplog.text
assert "Mock Component" in out
def test_component_parameters_getter(test_classes):
MockComponent, _, _ = test_classes
component = MockComponent({"test": "parameter"})
assert component.parameters == {"test": "parameter"}
component.parameters["test"] = "new"
assert component.parameters == {"test": "parameter"}
def test_component_parameters_init(
logistic_regression_binary_pipeline_class, linear_regression_pipeline_class
):
for component_class in all_components():
print("Testing component {}".format(component_class.name))
component = component_class()
parameters = component.parameters
component2 = component_class(**parameters)
parameters2 = component2.parameters
assert parameters == parameters2
def test_clone_init():
params = {"param_a": 2, "param_b": 11}
clf = MockFitComponent(**params)
clf_clone = clf.clone()
assert clf.parameters == clf_clone.parameters
assert clf_clone.random_seed == clf.random_seed
def test_clone_fitted(X_y_binary):
X, y = X_y_binary
params = {"param_a": 3, "param_b": 7}
clf = MockFitComponent(**params)
clf.fit(X, y)
predicted = clf.predict(X)
clf_clone = clf.clone()
assert clf_clone.random_seed == clf.random_seed
assert clf.parameters == clf_clone.parameters
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
clf_clone.predict(X)
clf_clone.fit(X, y)
predicted_clone = clf_clone.predict(X)
np.testing.assert_almost_equal(predicted, predicted_clone)
def test_components_init_kwargs():
for component_class in all_components():
try:
component = component_class()
except EnsembleMissingPipelinesError:
continue
if component._component_obj is None:
continue
if isinstance(component, StackedEnsembleBase):
continue
obj_class = component._component_obj.__class__.__name__
module = component._component_obj.__module__
importlib.import_module(module, obj_class)
patched = module + "." + obj_class + ".__init__"
if component_class == LabelEncoder:
# scikit-learn's LabelEncoder found in different module than where we import from
patched = module[: module.rindex(".")] + "." + obj_class + ".__init__"
def all_init(self, *args, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
with patch(patched, new=all_init) as _:
component = component_class(test_arg="test")
component_with_different_kwargs = component_class(diff_test_arg="test")
assert component.parameters["test_arg"] == "test"
if not isinstance(component, (PolynomialDetrender, LabelEncoder)):
assert component._component_obj.test_arg == "test"
# Test equality of different components with same or different kwargs
assert component == component_class(test_arg="test")
assert component != component_with_different_kwargs
def test_component_has_random_seed():
for component_class in all_components():
params = inspect.signature(component_class.__init__).parameters
assert "random_seed" in params
def test_transformer_transform_output_type(X_y_binary):
X_np, y_np = X_y_binary
assert isinstance(X_np, np.ndarray)
assert isinstance(y_np, np.ndarray)
y_list = list(y_np)
X_df_no_col_names = pd.DataFrame(X_np)
range_index = pd.RangeIndex(start=0, stop=X_np.shape[1], step=1)
X_df_with_col_names = pd.DataFrame(
X_np, columns=["x" + str(i) for i in range(X_np.shape[1])]
)
y_series_no_name = pd.Series(y_np)
y_series_with_name = pd.Series(y_np, name="target")
datatype_combos = [
(X_np, y_np, range_index),
(X_np, y_list, range_index),
(X_df_no_col_names, y_series_no_name, range_index),
(X_df_with_col_names, y_series_with_name, X_df_with_col_names.columns),
]
for component_class in _all_transformers():
if component_class in [PolynomialDetrender, LogTransformer, LabelEncoder]:
# Skipping because these tests are handled in their respective test files
continue
print("Testing transformer {}".format(component_class.name))
for X, y, X_cols_expected in datatype_combos:
print(
'Checking output of transform for transformer "{}" on X type {} cols {}, y type {} name {}'.format(
component_class.name,
type(X),
X.columns if isinstance(X, pd.DataFrame) else None,
type(y),
y.name if isinstance(y, pd.Series) else None,
)
)
component = component_class()
# SMOTE will throw an error if we pass a ratio lower than the current class balance
if "Oversampler" == component_class.name:
# we cover this case in test_oversamplers
continue
elif component_class == TimeSeriesFeaturizer:
# covered in test_delayed_feature_transformer.py
continue
component.fit(X, y=y)
transform_output = component.transform(X, y=y)
if component.modifies_target:
assert isinstance(transform_output[0], pd.DataFrame)
assert isinstance(transform_output[1], pd.Series)
else:
assert isinstance(transform_output, pd.DataFrame)
if isinstance(component, SelectColumns) or isinstance(
component, SelectByType
):
assert transform_output.shape == (X.shape[0], 0)
elif isinstance(component, PCA) or isinstance(
component, LinearDiscriminantAnalysis
):
assert transform_output.shape[0] == X.shape[0]
assert transform_output.shape[1] <= X.shape[1]
elif isinstance(component, DFSTransformer):
assert transform_output.shape[0] == X.shape[0]
assert transform_output.shape[1] >= X.shape[1]
elif component.modifies_target:
assert transform_output[0].shape == X.shape
assert transform_output[1].shape[0] == X.shape[0]
assert len(transform_output[1].shape) == 1
else:
assert transform_output.shape == X.shape
assert list(transform_output.columns) == list(X_cols_expected)
transform_output = component.fit_transform(X, y=y)
if component.modifies_target:
assert isinstance(transform_output[0], pd.DataFrame)
assert isinstance(transform_output[1], pd.Series)
else:
assert isinstance(transform_output, pd.DataFrame)
if isinstance(component, SelectColumns) or isinstance(
component, SelectByType
):
assert transform_output.shape == (X.shape[0], 0)
elif isinstance(component, PCA) or isinstance(
component, LinearDiscriminantAnalysis
):
assert transform_output.shape[0] == X.shape[0]
assert transform_output.shape[1] <= X.shape[1]
elif isinstance(component, DFSTransformer):
assert transform_output.shape[0] == X.shape[0]
assert transform_output.shape[1] >= X.shape[1]
elif component.modifies_target:
assert transform_output[0].shape == X.shape
assert transform_output[1].shape[0] == X.shape[0]
assert len(transform_output[1].shape) == 1
else:
assert transform_output.shape == X.shape
assert list(transform_output.columns) == list(X_cols_expected)
@pytest.mark.parametrize(
"cls",
[
cls
for cls in all_components()
if cls
not in [
StackedEnsembleClassifier,
StackedEnsembleRegressor,
]
],
)
def test_default_parameters(cls):
assert (
cls.default_parameters == cls().parameters
), f"{cls.__name__}'s default parameters don't match __init__."
@pytest.mark.parametrize("cls", [cls for cls in all_components()])
def test_default_parameters_raise_no_warnings(cls):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cls()
assert len(w) == 0
def test_estimator_check_for_fit(X_y_binary):
class MockEstimatorObj:
def __init__(self):
pass
def fit(self, X, y):
return self
def predict(self, X):
series = pd.Series([0] * len(X))
series.ww.init()
return series
def predict_proba(self, X):
df = pd.DataFrame({0: [0] * len(X)})
df.ww.init()
return df
class MockEstimator(Estimator):
name = "Mock Estimator"
model_family = ModelFamily.LINEAR_MODEL
supported_problem_types = ["binary"]
def __init__(self, parameters=None, component_obj=None, random_seed=0):
est = MockEstimatorObj()
super().__init__(
parameters=parameters, component_obj=est, random_seed=random_seed
)
X, y = X_y_binary
est = MockEstimator()
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
est.predict(X)
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
est.predict_proba(X)
est.fit(X, y)
est.predict(X)
est.predict_proba(X)
def test_transformer_check_for_fit(X_y_binary):
class MockTransformerObj:
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X, y=None):
return X
def fit_transform(self, X, y=None):
return X
class MockTransformer(Transformer):
name = "Mock Transformer"
def __init__(self, parameters=None, component_obj=None, random_seed=0):
transformer = MockTransformerObj()
super().__init__(
parameters=parameters,
component_obj=transformer,
random_seed=random_seed,
)
def transform(self, X, y=None):
return X
def inverse_transform(self, X, y=None):
return X, y
X, y = X_y_binary
trans = MockTransformer()
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
trans.transform(X)
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
trans.inverse_transform(X, y)
trans.fit(X, y)
trans.transform(X)
trans.fit_transform(X, y)
trans.inverse_transform(X, y)
def test_transformer_check_for_fit_with_overrides(X_y_binary):
class MockTransformerWithOverride(Transformer):
name = "Mock Transformer"
def fit(self, X, y):
return self
def transform(self, X, y=None):
df = pd.DataFrame()
df.ww.init()
return df
class MockTransformerWithOverrideSubclass(Transformer):
name = "Mock Transformer Subclass"
def fit(self, X, y):
return self
def transform(self, X, y=None):
df = pd.DataFrame()
df.ww.init()
return df
X, y = X_y_binary
transformer = MockTransformerWithOverride()
transformer_subclass = MockTransformerWithOverrideSubclass()
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
transformer.transform(X)
with pytest.raises(ComponentNotYetFittedError, match="You must fit"):
transformer_subclass.transform(X)
transformer.fit(X, y)
transformer.transform(X)
transformer_subclass.fit(X, y)
transformer_subclass.transform(X)
def test_all_transformers_needs_fitting():
for component_class in _all_transformers() + _all_estimators():
if component_class.__name__ in [
"DropColumns",
"SelectColumns",
"SelectByType",
]:
assert not component_class.needs_fitting
else:
assert component_class.needs_fitting
def test_all_transformers_check_fit(X_y_binary, ts_data_binary):
for component_class in _all_transformers():
X, y = X_y_binary
if not component_class.needs_fitting:
continue
component = component_class()
# SMOTE will throw errors if we call it but cannot oversample
if "Oversampler" == component_class.name:
component = component_class(sampling_ratio=1)
elif component_class == TimeSeriesFeaturizer:
X, y = ts_data_binary
component = component_class(date_index="date")
with pytest.raises(
ComponentNotYetFittedError, match=f"You must fit {component_class.__name__}"
):
component.transform(X, y)
component.fit(X, y)
component.transform(X, y)
component = component_class()
if "Oversampler" == component_class.name:
component = component_class(sampling_ratio=1)
elif component_class == TimeSeriesFeaturizer:
component = component_class(date_index="date")
component.fit_transform(X, y)
component.transform(X, y)
def test_all_estimators_check_fit(
X_y_binary, ts_data, test_estimator_needs_fitting_false, helper_functions
):
estimators_to_check = [
estimator
for estimator in _all_estimators()
if estimator
not in [
StackedEnsembleClassifier,
StackedEnsembleRegressor,
TimeSeriesBaselineEstimator,
VowpalWabbitBinaryClassifier,
VowpalWabbitMulticlassClassifier,
VowpalWabbitRegressor,
]
] + [test_estimator_needs_fitting_false]
for component_class in estimators_to_check:
if not component_class.needs_fitting:
continue
if (
ProblemTypes.TIME_SERIES_REGRESSION
in component_class.supported_problem_types
):
X, y = ts_data
else:
X, y = X_y_binary
component = helper_functions.safe_init_component_with_njobs_1(component_class)
with patch.object(component, "_component_obj") as mock_component_obj:
with patch.object(
mock_component_obj, "predict"
) as mock_component_obj_predict:
mock_component_obj_predict.return_value = pd.Series([0] * len(y))
if "Prophet" in component.name:
mock_component_obj_predict.return_value = {
"yhat": pd.Series([0] * len(y)),
"ds": pd.Series([0] * len(y)),
}
with pytest.raises(
ComponentNotYetFittedError,
match=f"You must fit {component_class.__name__}",
):
component.predict(X)
if (
ProblemTypes.BINARY in component.supported_problem_types
or ProblemTypes.MULTICLASS in component.supported_problem_types
):
with pytest.raises(
ComponentNotYetFittedError,
match=f"You must fit {component_class.__name__}",
):
component.predict_proba(X)
with pytest.raises(
ComponentNotYetFittedError,
match=f"You must fit {component_class.__name__}",
):
component.feature_importance
component.fit(X, y)
if (
ProblemTypes.BINARY in component.supported_problem_types
or ProblemTypes.MULTICLASS in component.supported_problem_types
):
component.predict_proba(X)
component.predict(X)
component.feature_importance
@pytest.mark.parametrize("data_type", ["li", "np", "pd", "ww"])
def test_all_transformers_check_fit_input_type(
data_type, X_y_binary, make_data_type, ts_data_binary
):
for component_class in _all_transformers():
X, y = X_y_binary
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
kwargs = {}
if not component_class.needs_fitting or "Oversampler" in component_class.name:
# since SMOTE determines categorical columns through the logical type, it can only accept ww data
continue
if component_class == TimeSeriesFeaturizer:
X, y = ts_data_binary
kwargs = {"date_index": "date"}
component = component_class(**kwargs)
component.fit(X, y)
def test_no_fitting_required_components(
X_y_binary, test_estimator_needs_fitting_false, helper_functions
):
X, y = X_y_binary
for component_class in all_components() + [test_estimator_needs_fitting_false]:
if not component_class.needs_fitting:
component = helper_functions.safe_init_component_with_njobs_1(
component_class
)
if issubclass(component_class, Estimator):
component.predict(X)
else:
component.transform(X, y)
def test_serialization(X_y_binary, ts_data, tmpdir, helper_functions):
path = os.path.join(str(tmpdir), "component.pkl")
requires_date_index = [ARIMARegressor, ProphetRegressor, TimeSeriesFeaturizer]
for component_class in all_components():
print("Testing serialization of component {}".format(component_class.name))
component = helper_functions.safe_init_component_with_njobs_1(component_class)
if component_class in requires_date_index:
component = component_class(date_index="date")
X, y = ts_data
else:
X, y = X_y_binary
component.fit(X, y)
for pickle_protocol in range(cloudpickle.DEFAULT_PROTOCOL + 1):
component.save(path, pickle_protocol=pickle_protocol)
loaded_component = ComponentBase.load(path)
assert component.parameters == loaded_component.parameters
assert component.describe(return_dict=True) == loaded_component.describe(
return_dict=True
)
if issubclass(component_class, Estimator) and not (
isinstance(
component,
(
StackedEnsembleClassifier,
StackedEnsembleRegressor,
VowpalWabbitBinaryClassifier,
VowpalWabbitMulticlassClassifier,
VowpalWabbitRegressor,
),
)
):
assert (
component.feature_importance == loaded_component.feature_importance
).all()
@patch("cloudpickle.dump")
def test_serialization_protocol(mock_cloudpickle_dump, tmpdir):
path = os.path.join(str(tmpdir), "pipe.pkl")
component = LogisticRegressionClassifier()
component.save(path)
assert len(mock_cloudpickle_dump.call_args_list) == 1
assert (
mock_cloudpickle_dump.call_args_list[0][1]["protocol"]
== cloudpickle.DEFAULT_PROTOCOL
)
mock_cloudpickle_dump.reset_mock()
component.save(path, pickle_protocol=42)
assert len(mock_cloudpickle_dump.call_args_list) == 1
assert mock_cloudpickle_dump.call_args_list[0][1]["protocol"] == 42
@pytest.mark.parametrize("estimator_class", _all_estimators())
def test_estimators_accept_all_kwargs(
estimator_class,
logistic_regression_binary_pipeline_class,
linear_regression_pipeline_class,
):
estimator = estimator_class()
if estimator._component_obj is None:
pytest.skip(
f"Skipping {estimator_class} because does not have component object."
)
if estimator_class.model_family == ModelFamily.ENSEMBLE:
params = estimator.parameters
elif estimator_class.model_family == ModelFamily.PROPHET:
params = estimator.get_params()
else:
params = estimator._component_obj.get_params()
if "random_state" in params:
del params["random_state"]
estimator_class(**params)
def test_component_equality_different_classes():
# Tests that two classes which are equivalent are not equal
class MockComponent(ComponentBase):
name = "Mock Component"
model_family = ModelFamily.NONE
modifies_features = True
modifies_target = False
training_only = False
class MockComponentWithADifferentName(ComponentBase):
name = "Mock Component"
model_family = ModelFamily.NONE
modifies_features = True
modifies_target = False
training_only = False
assert MockComponent() != MockComponentWithADifferentName()
def test_component_equality_subclasses():
class MockComponent(ComponentBase):
name = "Mock Component"
model_family = ModelFamily.NONE
modifies_features = True
modifies_target = False
training_only = False
class MockEstimatorSubclass(MockComponent):
pass
assert MockComponent() != MockEstimatorSubclass()
def test_component_equality():
class MockComponent(ComponentBase):
name = "Mock Component"
model_family = ModelFamily.NONE
modifies_features = True
modifies_target = False
training_only = False
def __init__(self, param_1=0, param_2=0, random_seed=0, **kwargs):
parameters = {"param_1": param_1, "param_2": param_2}
parameters.update(kwargs)
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y=None):
return self
# Test self-equality
mock_component = MockComponent()
assert mock_component == mock_component
# Test defaults
assert MockComponent() == MockComponent()
# Test random_state and random_seed
assert MockComponent(random_seed=10) == MockComponent(random_seed=10)
assert MockComponent(random_seed=10) != MockComponent(random_seed=0)
# Test parameters
assert MockComponent(1, 2) == MockComponent(1, 2)
assert MockComponent(1, 2) != MockComponent(1, 0)
assert MockComponent(0, 2) != MockComponent(1, 2)
# Test fitted equality
mock_component.fit(pd.DataFrame({}))
assert mock_component != MockComponent()
@pytest.mark.parametrize("component_class", all_components())
def test_component_equality_all_components(
component_class,
logistic_regression_binary_pipeline_class,
linear_regression_pipeline_class,
):
component = component_class()
parameters = component.parameters
equal_component = component_class(**parameters)
assert component == equal_component
def test_component_equality_with_subclasses(test_classes):
MockComponent, MockEstimator, MockTransformer = test_classes
mock_component = MockComponent()
mock_estimator = MockEstimator()
mock_transformer = MockTransformer()
assert mock_component != mock_estimator
assert mock_component != mock_transformer
assert mock_estimator != mock_component
assert mock_estimator != mock_transformer
assert mock_transformer != mock_component
assert mock_transformer != mock_estimator
def test_mock_component_str(test_classes):
MockComponent, MockEstimator, MockTransformer = test_classes
assert str(MockComponent()) == "Mock Component"
assert str(MockEstimator()) == "Mock Estimator"
assert str(MockTransformer()) == "Mock Transformer"
def test_mock_component_repr():
component = MockFitComponent()
assert repr(component) == "MockFitComponent(param_a=2, param_b=10)"
component_with_params = MockFitComponent(param_a=29, param_b=None, random_seed=42)
assert repr(component_with_params) == "MockFitComponent(param_a=29, param_b=None)"
component_with_nan = MockFitComponent(param_a=np.nan, param_b=float("nan"))
assert (
repr(component_with_nan) == "MockFitComponent(param_a=np.nan, param_b=np.nan)"
)
component_with_inf = MockFitComponent(param_a=np.inf, param_b=float("-inf"))
assert (
repr(component_with_inf)
== "MockFitComponent(param_a=float('inf'), param_b=float('-inf'))"
)
@pytest.mark.parametrize("component_class", all_components())
def test_component_str(
component_class,
logistic_regression_binary_pipeline_class,
linear_regression_pipeline_class,
):
component = component_class()
assert str(component) == component.name
@pytest.mark.parametrize(
"categorical",
[
{
"type": Categorical(["mean", "median", "mode"]),
"categories": Categorical(["blue", "green"]),
},
{"type": ["mean", "median", "mode"], "categories": ["blue", "green"]},
],
)
def test_categorical_hyperparameters(X_y_binary, categorical):
X, y = X_y_binary
class MockEstimator:
def fit(self, X, y):
pass
class MockComponent(Estimator):
name = "Mock Estimator"
model_family = ModelFamily.LINEAR_MODEL
supported_problem_types = ["binary"]
hyperparameter_ranges = categorical
def __init__(self, agg_type, category="green"):
parameters = {"type": agg_type, "categories": category}
est = MockEstimator()
super().__init__(parameters=parameters, component_obj=est, random_seed=0)
assert MockComponent(agg_type="mean").fit(X, y)
assert MockComponent(agg_type="moat", category="blue").fit(X, y)
def test_generate_code_errors():
with pytest.raises(ValueError, match="Element must be a component instance"):
generate_component_code(BinaryClassificationPipeline([RandomForestClassifier]))
with pytest.raises(ValueError, match="Element must be a component instance"):
generate_component_code(LinearRegressor)
with pytest.raises(ValueError, match="Element must be a component instance"):
generate_component_code(Imputer)
with pytest.raises(ValueError, match="Element must be a component instance"):
generate_component_code(ComponentBase)
def test_generate_code():
expected_code = (
"from evalml.pipelines.components.estimators.classifiers.logistic_regression_classifier import LogisticRegressionClassifier"
"\n\nlogisticRegressionClassifier = LogisticRegressionClassifier(**{'penalty': 'l2', 'C': 1.0, 'n_jobs': -1, 'multi_class': 'auto', 'solver': 'lbfgs'})"
)
component_code = generate_component_code(LogisticRegressionClassifier())
assert component_code == expected_code
expected_code = (
"from evalml.pipelines.components.estimators.regressors.et_regressor import ExtraTreesRegressor"
"\n\nextraTreesRegressor = ExtraTreesRegressor(**{'n_estimators': 50, 'max_features': 'auto', 'max_depth': 6, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_jobs': -1})"
)
component_code = generate_component_code(ExtraTreesRegressor(n_estimators=50))
assert component_code == expected_code
expected_code = (
"from evalml.pipelines.components.transformers.imputers.imputer import Imputer"
"\n\nimputer = Imputer(**{'categorical_impute_strategy': 'most_frequent', 'numeric_impute_strategy': 'mean', 'categorical_fill_value': None, 'numeric_fill_value': None})"
)
component_code = generate_component_code(Imputer())
assert component_code == expected_code
def test_generate_code_custom(test_classes):
MockComponent, MockEstimator, MockTransformer = test_classes
expected_code = "mockComponent = MockComponent(**{})"
component_code = generate_component_code(MockComponent())
assert component_code == expected_code
expected_code = "mockEstimator = MockEstimator(**{})"
component_code = generate_component_code(MockEstimator())
assert component_code == expected_code
expected_code = "mockTransformer = MockTransformer(**{})"
component_code = generate_component_code(MockTransformer())
assert component_code == expected_code
@pytest.mark.parametrize("transformer_class", _all_transformers())
@pytest.mark.parametrize("use_custom_index", [True, False])
def test_transformer_fit_and_transform_respect_custom_indices(
use_custom_index, transformer_class, X_y_binary, ts_data_binary
):
check_names = True
if transformer_class == DFSTransformer:
check_names = False
if use_custom_index:
pytest.skip("The DFSTransformer changes the index so we skip it.")
if transformer_class == PolynomialDetrender:
pytest.skip(
"Skipping PolynomialDetrender because we test that it respects custom indices in "
"test_polynomial_detrender.py"
)
X, y = X_y_binary
kwargs = {}
if transformer_class == TimeSeriesFeaturizer:
kwargs.update({"date_index": "date"})
X, y = ts_data_binary
X = pd.DataFrame(X)
y = pd.Series(y)
if use_custom_index:
custom_index = range(100, 100 + X.shape[0])
X.index = custom_index
y.index = custom_index
X_original_index = X.index.copy()
y_original_index = y.index.copy()
transformer = transformer_class(**kwargs)
transformer.fit(X, y)
pd.testing.assert_index_equal(X.index, X_original_index)
pd.testing.assert_index_equal(y.index, y_original_index)
if isinstance(transformer, BaseSampler):
return
elif transformer_class.modifies_target:
X_t, y_t = transformer.transform(X, y)
pd.testing.assert_index_equal(
y_t.index, y_original_index, check_names=check_names
)
else:
X_t = transformer.transform(X, y)
pd.testing.assert_index_equal(
y.index, y_original_index, check_names=check_names
)
if hasattr(transformer_class, "inverse_transform"):
y_inv = transformer.inverse_transform(y)
pd.testing.assert_index_equal(
y_inv.index, y_original_index, check_names=check_names
)
pd.testing.assert_index_equal(X_t.index, X_original_index, check_names=check_names)
@pytest.mark.parametrize("estimator_class", _all_estimators())
@pytest.mark.parametrize("use_custom_index", [True, False])
def test_estimator_fit_respects_custom_indices(
use_custom_index,
estimator_class,
X_y_binary,
X_y_regression,
ts_data,
helper_functions,
):
input_pipelines = []
supported_problem_types = estimator_class.supported_problem_types
ts_problem = False
if ProblemTypes.REGRESSION in supported_problem_types:
X, y = X_y_regression
elif ProblemTypes.TIME_SERIES_REGRESSION in supported_problem_types:
X, y = ts_data
ts_problem = True
else:
X, y = X_y_binary
X = pd.DataFrame(X)
y = pd.Series(y)
if use_custom_index and ts_problem:
X.index = pd.date_range("2020-10-01", "2020-10-31")
y.index = pd.date_range("2020-10-01", "2020-10-31")
elif use_custom_index and not ts_problem:
custom_index = range(100, 100 + X.shape[0])
X.index = custom_index
y.index = custom_index
X_original_index = X.index.copy()
y_original_index = y.index.copy()
if input_pipelines:
estimator = estimator_class(n_jobs=1, input_pipelines=input_pipelines)
else:
estimator = helper_functions.safe_init_component_with_njobs_1(estimator_class)
estimator.fit(X, y)
pd.testing.assert_index_equal(X.index, X_original_index)
| pd.testing.assert_index_equal(y.index, y_original_index) | pandas.testing.assert_index_equal |
"""
This module contains methods that will be applied inside of the apply call on the freshly convertet
Spark DF that is now as list of dicts.
These methods are meant to be applied in the internal calls of the metadata extraction.
They expect dictionaries which represent the metadata field extracted from Spark NLP annotators.
"""
import numpy as np
from pyspark.sql import Row as PysparkRow
from nlu.pipe.extractors.extractor_base_data_classes import *
from functools import reduce, partial
import pandas as pd
def extract_pyspark_rows(r: pd.Series, ) -> pd.Series:
""" Convert pyspark.sql.Row[Annotation] to List(Dict[str,str]) objects. Except for key=metadata in dict, this element in the Dict which is [str,Dict[str,str]]
Checks if elements are of type list and wether they contain Pyspark Rows.
If PysparkRow, call .asDict() on every row element to generate the dicts
First method that runs after toPandas() call
"""
if isinstance(r, str):
return r
elif isinstance(r, list):
if len(r) == 0:
return r
elif isinstance(r[0], PysparkRow):
pyspark_row_to_list = lambda l: l.asDict()
return list(map(pyspark_row_to_list, r))
return r
def extract_pyarrow_rows(r: pd.Series, ) -> pd.Series:
""" Convert pyspark.sql.Row[Annotation] to List(Dict[str,str]) objects. Except for key=metadata in dict, this element in the Dict which is [str,Dict[str,str]]
Checks if elements are of type list and wether they contain Pyspark Rows.
If PysparkRow, call .asDict() on every row element to generate the dicts
First method that runs after toPandas() call
"""
if isinstance(r, str):
return r
elif isinstance(r, np.ndarray):
if len(r) == 0:
return r
elif isinstance(r[0], dict) and 'annotatorType' in r[0].keys():
r[0]['metadata'] = dict(r[0]['metadata'])
return r
return r
def extract_base_sparknlp_features(row: pd.Series, configs: SparkNLPExtractorConfig) -> dict:
"""
Extract base features common in all saprk NLP annotators
Begin/End/Embedding/Metadata/Result, except for the blacklisted features
Expects a list with Token Annotator Outputs from extract_pyspark_rows() , i.e
Setting pop to true for a certain field will return only the first element of that fields list of elements. Useful if that field always has exactly 1 result, like many classifirs
[{'annotatorType': 'token',
'begin': 0,
'embeddings': [],
'end': 4,
'metadata': {'sentence': '0'},
'result': 'Hello'
}]
row = pyspark.row
or
[
{'annotatorType': 'language',
'begin': 0,
'embeddings': [],
'end': 57,
'metadata': {'bg': '0.0',
'sentence': '0',
'sl': '5.2462015E-24',
'sv': '2.5977007E-25'},
'result': 'en'}
]
returns a DICT
"""
unpack_dict_list = lambda d, k: d[k]
unpack_begin = lambda x: unpack_dict_list(x, 'begin')
unpack_end = lambda x: unpack_dict_list(x, 'end')
unpack_annotator_type = lambda x: unpack_dict_list(x, 'annotatorType')
unpack_result = lambda x: unpack_dict_list(x, 'result')
unpack_embeddings = lambda x: unpack_dict_list(x, 'embeddings')
# Either extract list of anno results and put them in a dict with corrosponding key name or return empty dict {} for easy merge in return
annotator_types = {configs.output_col_prefix + '_types': list(
map(unpack_annotator_type, row))} if configs.get_annotator_type else {}
# Same logic as above, but we check wether to pop or not and either evaluate the map result with list() or just next()
if configs.pop_result_list:
results = {configs.output_col_prefix + '_results': next(map(unpack_result, row))} if configs.get_result else {}
else:
results = {configs.output_col_prefix + '_results': list(map(unpack_result, row))} if configs.get_result else {}
if configs.pop_begin_list:
beginnings = {configs.output_col_prefix + '_beginnings': next(
map(unpack_begin, row))} if configs.get_begin or configs.get_positions else {}
else:
beginnings = {configs.output_col_prefix + '_beginnings': list(
map(unpack_begin, row))} if configs.get_begin or configs.get_positions else {}
if configs.pop_end_list:
endings = {configs.output_col_prefix + '_endings': next(
map(unpack_end, row))} if configs.get_end or configs.get_positions else {}
else:
endings = {configs.output_col_prefix + '_endings': list(
map(unpack_end, row))} if configs.get_end or configs.get_positions else {}
if configs.pop_embeds_list:
embeddings = {
configs.output_col_prefix + '_embeddings': next(map(unpack_embeddings, row))} if configs.get_embeds else {}
else:
embeddings = {
configs.output_col_prefix + '_embeddings': list(map(unpack_embeddings, row))} if configs.get_embeds else {}
return {**beginnings, **endings, **results, **annotator_types, **embeddings} # Merge dicts
def extract_sparknlp_metadata(row: pd.Series, configs: SparkNLPExtractorConfig) -> dict:
"""
Extract base features common in all saprk NLP annotators
Begin/End/Embedding/Metadata/Result, except for the blacklisted features
Expects a list with Token Annotator Outputs, i.e.
Can either use a WHITE_LISTE or BLACK_LIST or get ALL metadata
For WHITE_LIST != [], only metadata keys/values will be kepts, for which the keys are contained in the white list
For WHITE_LIST == [] AND BLACK_LIST !=, all metadata key/values will be returned, which are not on the black list.
If WHITE_LIST is not [] the BLACK_LIST will be ignored.
returns one DICT which will be merged into pd.Serise by the extractor calling this exctractor for .apply() in pythonify
"""
if len(row) == 0: return {}
unpack_dict_list = lambda d, k: d[k]
# extract list of metadata dictionaries (all dict should have same keys)
unpack_metadata_to_dict_list = lambda x: unpack_dict_list(x, 'metadata')
metadatas_dict_list = list(map(unpack_metadata_to_dict_list, row))
# extract keys, which should all be equal in all rows
if configs.get_full_meta:
keys_in_metadata = list(metadatas_dict_list[0].keys()) if len(metadatas_dict_list) > 0 else []
elif len(configs.meta_white_list) != 0:
keys_in_metadata = [k for k in metadatas_dict_list[0].keys() if k in configs.meta_white_list]
elif len(configs.meta_black_list) != 0:
keys_in_metadata = [k for k in metadatas_dict_list[0].keys() if k not in configs.meta_black_list]
else:
keys_in_metadata = []
# dectorate lambda with key to extract, equalt to def decorate_f(key): return lambda x,y : x+ [y[key]]
# For a list of dicts which all have the same keys, will return a lit of all the values for one key in all the dicts
extract_val_from_dic_list_to_list = lambda key: lambda x, y: x + [y[key]]
# List of lambda expression, on for each Key to be extracted. (TODO balcklisting?)
dict_value_extractors = list(map(extract_val_from_dic_list_to_list, keys_in_metadata))
# reduce list of dicts with same struct and a common key to a list of values for thay key. Leveraging closuer for meta_dict_list
reduce_dict_list_to_values = lambda t: reduce(t, metadatas_dict_list, [])
# list of lists, where each list is corrosponding to all values in the previous dict list
meta_values_list = list(map(reduce_dict_list_to_values, dict_value_extractors)) # , metadatas_dict_list,[] ))
# add prefix to key and zip with values for final dict result
result = dict(
zip(list(map(lambda x: 'meta_' + configs.output_col_prefix + '_' + x, keys_in_metadata)), meta_values_list))
return result
def extract_master(row: pd.Series, configs: SparkNLPExtractorConfig) -> pd.Series:
"""
Re-Usable base extractor for simple Annotators like Document/Token/etc..?
extract_universal/?/Better name?
row = a list or Spark-NLP annotations as dictionary
"""
if len(row) == 0: return | pd.Series({}) | pandas.Series |
"""
Compute the statistical impact of features given a trained estimator
"""
from scipy.stats.mstats import mquantiles
import numpy
import pandas
def averaged_impact(impact, normalize=True):
"""
Computes the averaged impact across all quantiles for each feature
:param impact: Array-like object of shape [n_quantiles, n_features].
This should be the return value of FeatureImpact.compute_impact()
:param normalize: Whether to normalize the averaged impacts such that
that the impacts sum up to one
:returns: The averaged impact as a pandas.Series of shape [n_features]
"""
impact = pandas.DataFrame(impact)
average = pandas.Series(index=impact.columns, dtype=float)
for col in impact:
average[col] = impact[col].mean()
if normalize:
average /= average.sum()
return average
class FeatureImpact(object):
"""
Compute the statistical impact of features given a trained estimator
"""
def __init__(self):
self._quantiles = None
@property
def quantiles(self):
"""
The quantiles corresponding to the features
:returns: pandas.DataFrame of shape [n_quantiles, n_features] or None
"""
return self._quantiles
@quantiles.setter
def quantiles(self, value):
"""
The quantiles corresponding to the features
:param value: Array-like object of shape [n_quantiles, n_features]
"""
self._quantiles = pandas.DataFrame(value, dtype=float)
def make_quantiles(self, X, n_quantiles=9):
"""
Generates the quantiles for each feature in X. The quantiles for one
feature are computed such that the area between quantiles is the
same throughout. The default quantiles are computed at the following
probablities: 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9
The actual quantiles being used are the values that are closest to the
computed quantiles. This ensures only values are used that are actually
part of the features, particularly important for distributions with
multiple peaks (e.g. categorical features).
:param X: Features. Array-like object of shape [n_samples, n_features]
:param n_quantiles: The number of quantiles to compute
"""
if n_quantiles < 1:
raise FeatureImpactError("n_quantiles must be at least one.")
X = pandas.DataFrame(X)
probs = numpy.linspace(0.0, 1.0, n_quantiles + 2)[1:-1]
self._quantiles = pandas.DataFrame(dtype=float)
for col in X:
feature = X[col].dropna().values
values = []
for quantile in mquantiles(feature, probs):
closest = numpy.abs(feature - quantile).argmin()
values.append(feature[closest])
self._quantiles[col] = values
def compute_impact(self, estimator, X, method='predict'):
"""
Computes the statistical impact of each feature based on the mean
variation of the difference between perturbed and original predictions.
The impact is always >= 0.
:param estimator: A trained estimator implementing the given predict `method`.
It is assumed that the predict method does not change its input.
:param X: Features. Array-like object of shape [n_samples, n_features]
:param method: The predict method to call on `estimator`.
:returns: Impact as a pandas.DataFrame of shape [n_quantiles, n_features]
"""
if self._quantiles is None:
raise FeatureImpactError("make_quantiles() must be called first "
"or the quantiles explicitly assigned")
if not hasattr(estimator, method):
raise FeatureImpactError("estimator does not implement {}()".format(method))
X = pandas.DataFrame(X)
y = getattr(estimator, method)(X)
impact = pandas.DataFrame(dtype=float)
for feature in X:
orig_feat = pandas.Series(X[feature], copy=True)
x_std = orig_feat.std(skipna=True)
if x_std > 0.0:
imp = []
for quantile, count in self._quantiles[feature].value_counts().iteritems():
X[feature] = quantile
y_star = getattr(estimator, method)(X)
diff_std = | pandas.Series(y - y_star) | pandas.Series |
Subsets and Splits